Browse Source

Add patches for v5.11

Links:
 - kernel: https://github.com/linux-surface/kernel/commit/7978404d21d71c2de05e9ba79432294514df485a
 - SAM: https://github.com/linux-surface/surface-aggregator-module/commit/bee2add45fb668a794406ce6a648cece4fd00d51
 - SAM-gen4: https://github.com/linux-surface/surface-aggregator-module-gen4/commit/e321205faaf5f39675ccb8c2314b171c2319b9bc
 - GPE: https://github.com/linux-surface/surface-gpe/commit/6ecfdb39050129bf17e5f1fff784e4df7aa56171
 - Hotplug: https://github.com/linux-surface/surface-hotplug/commit/595ed62f24417b2ac97f4658cfc30776c9b888cf
 - IPTS: https://github.com/linux-surface/intel-precise-touch/commit/3642d0e4ebf98ded318cf4bcbc79c90c449b321f
Maximilian Luz 4 years ago
parent
commit
51a06b0112

+ 60 - 0
configs/surface-5.11.config

@@ -0,0 +1,60 @@
+#
+# Surface Aggregator Module
+#
+CONFIG_SURFACE_AGGREGATOR=m
+CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION=n
+CONFIG_SURFACE_AGGREGATOR_BUS=y
+CONFIG_SURFACE_AGGREGATOR_CDEV=m
+CONFIG_SURFACE_AGGREGATOR_REGISTRY=m
+
+CONFIG_SURFACE_ACPI_NOTIFY=m
+CONFIG_SURFACE_DTX=m
+CONFIG_SURFACE_PERFMODE=m
+
+CONFIG_SURFACE_HID=m
+CONFIG_SURFACE_KBD=m
+
+CONFIG_BATTERY_SURFACE=m
+CONFIG_CHARGER_SURFACE=m
+
+#
+# Surface Hotplug
+#
+CONFIG_SURFACE_HOTPLUG=m
+
+#
+# IPTS touchscreen
+#
+# This only enables the user interface for IPTS data.
+# For the touchscreen to work, you need to install iptsd.
+#
+CONFIG_MISC_IPTS=m
+
+#
+# Cameras: IPU3
+#
+CONFIG_VIDEO_IPU3_IMGU=m
+CONFIG_VIDEO_IPU3_CIO2=m
+CONFIG_CIO2_BRIDGE=y
+CONFIG_INTEL_SKL_INT3472=m
+
+#
+# Cameras: Sensor drivers
+#
+CONFIG_VIDEO_OV5693=m
+CONFIG_VIDEO_OV8865=m
+
+#
+# ALS Sensor for Surface Book 3, Surface Laptop 3, Surface Pro 7
+#
+CONFIG_APDS9960=m
+
+#
+# Other Drivers
+#
+CONFIG_INPUT_SOC_BUTTON_ARRAY=m
+CONFIG_SURFACE_3_BUTTON=m
+CONFIG_SURFACE_3_POWER_OPREGION=m
+CONFIG_SURFACE_PRO3_BUTTON=m
+CONFIG_SURFACE_GPE=m
+CONFIG_SURFACE_BOOK1_DGPU_SWITCH=m

+ 101 - 0
patches/5.11/0001-surface3-oemb.patch

@@ -0,0 +1,101 @@
+From be52a2ec3db3f16d7408b40fdaa17b8d066725b3 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 18 Oct 2020 16:42:44 +0900
+Subject: [PATCH] (surface3-oemb) add DMI matches for Surface 3 with broken DMI
+ table
+
+On some Surface 3, the DMI table gets corrupted for unknown reasons
+and breaks existing DMI matching used for device-specific quirks.
+
+This commit adds the (broken) DMI data into dmi_system_id tables used
+for quirks so that each driver can enable quirks even on the affected
+systems.
+
+On affected systems, DMI data will look like this:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:OEMB
+    /sys/devices/virtual/dmi/id/board_vendor:OEMB
+    /sys/devices/virtual/dmi/id/chassis_vendor:OEMB
+    /sys/devices/virtual/dmi/id/product_name:OEMB
+    /sys/devices/virtual/dmi/id/sys_vendor:OEMB
+
+Expected:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:Surface 3
+    /sys/devices/virtual/dmi/id/board_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/chassis_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/product_name:Surface 3
+    /sys/devices/virtual/dmi/id/sys_vendor:Microsoft Corporation
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: surface3-oemb
+---
+ drivers/platform/surface/surface3-wmi.c           | 7 +++++++
+ sound/soc/codecs/rt5645.c                         | 9 +++++++++
+ sound/soc/intel/common/soc-acpi-intel-cht-match.c | 8 ++++++++
+ 3 files changed, 24 insertions(+)
+
+diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c
+index 130b6f52a600..801083aa56d6 100644
+--- a/drivers/platform/surface/surface3-wmi.c
++++ b/drivers/platform/surface/surface3-wmi.c
+@@ -37,6 +37,13 @@ static const struct dmi_system_id surface3_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 420003d062c7..217e488cd4fa 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3687,6 +3687,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		},
+ 		.driver_data = (void *)&intel_braswell_platform_data,
+ 	},
++	{
++		.ident = "Microsoft Surface 3",
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++		.driver_data = (void *)&intel_braswell_platform_data,
++	},
+ 	{
+ 		/*
+ 		 * Match for the GPDwin which unfortunately uses somewhat
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index 2752dc955733..ef36a316e2ed 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -27,6 +27,14 @@ static const struct dmi_system_id cht_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+ 	},
++	{
++		.callback = cht_surface_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++	},
+ 	{ }
+ };
+ 
+-- 
+2.30.1
+

+ 1118 - 0
patches/5.11/0002-wifi.patch

@@ -0,0 +1,1118 @@
+From b639c7ba32929ad309783a293029f831e0e4f1e2 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Mon, 28 Sep 2020 17:46:49 +0900
+Subject: [PATCH] mwifiex: pcie: add DMI-based quirk impl for Surface devices
+
+This commit adds quirk implementation based on DMI matching with DMI
+table for Surface devices.
+
+This implementation can be used for quirks later.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/Makefile |   1 +
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |   4 +
+ drivers/net/wireless/marvell/mwifiex/pcie.h   |   1 +
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 114 ++++++++++++++++++
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  11 ++
+ 5 files changed, 131 insertions(+)
+ create mode 100644 drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+ create mode 100644 drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile
+index 162d557b78af..2bd00f40958e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/Makefile
++++ b/drivers/net/wireless/marvell/mwifiex/Makefile
+@@ -49,6 +49,7 @@ mwifiex_sdio-y += sdio.o
+ obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
+ 
+ mwifiex_pcie-y += pcie.o
++mwifiex_pcie-y += pcie_quirks.o
+ obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
+ 
+ mwifiex_usb-y += usb.o
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 5f0a61b974ee..41c71fbea9c1 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -27,6 +27,7 @@
+ #include "wmm.h"
+ #include "11n.h"
+ #include "pcie.h"
++#include "pcie_quirks.h"
+ 
+ #define PCIE_VERSION	"1.0"
+ #define DRV_NAME        "Marvell mwifiex PCIe"
+@@ -410,6 +411,9 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 			return ret;
+ 	}
+ 
++	/* check quirks */
++	mwifiex_initialize_quirks(card);
++
+ 	if (mwifiex_add_card(card, &card->fw_done, &pcie_ops,
+ 			     MWIFIEX_PCIE, &pdev->dev)) {
+ 		pr_err("%s failed\n", __func__);
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
+index 5ed613d65709..981e330c77d7 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
+@@ -244,6 +244,7 @@ struct pcie_service_card {
+ 	unsigned long work_flags;
+ 
+ 	bool pci_reset_ongoing;
++	unsigned long quirks;
+ };
+ 
+ static inline int
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+new file mode 100644
+index 000000000000..929aee2b0a60
+--- /dev/null
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -0,0 +1,114 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * File for PCIe quirks.
++ */
++
++/* The low-level PCI operations will be performed in this file. Therefore,
++ * let's use dev_*() instead of mwifiex_dbg() here to avoid troubles (e.g.
++ * to avoid using mwifiex_adapter struct before init or wifi is powered
++ * down, or causes NULL ptr deref).
++ */
++
++#include <linux/dmi.h>
++
++#include "pcie_quirks.h"
++
++/* quirk table based on DMI matching */
++static const struct dmi_system_id mwifiex_quirk_table[] = {
++	{
++		.ident = "Surface Pro 4",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 5",
++		.matches = {
++			/* match for SKU here due to generic product name "Surface Pro" */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 5 (LTE)",
++		.matches = {
++			/* match for SKU here due to generic product name "Surface Pro" */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 6",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Book 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Book 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Laptop 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Laptop 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 3"),
++		},
++		.driver_data = 0,
++	},
++	{}
++};
++
++void mwifiex_initialize_quirks(struct pcie_service_card *card)
++{
++	struct pci_dev *pdev = card->dev;
++	const struct dmi_system_id *dmi_id;
++
++	dmi_id = dmi_first_match(mwifiex_quirk_table);
++	if (dmi_id)
++		card->quirks = (uintptr_t)dmi_id->driver_data;
++
++	if (!card->quirks)
++		dev_info(&pdev->dev, "no quirks enabled\n");
++}
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+new file mode 100644
+index 000000000000..5326ae7e5671
+--- /dev/null
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -0,0 +1,11 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Header file for PCIe quirks.
++ */
++
++#include "pcie.h"
++
++/* quirks */
++// quirk flags can be added here
++
++void mwifiex_initialize_quirks(struct pcie_service_card *card);
+-- 
+2.30.1
+
+From e996d2b5ad3ee4b2e6bcbb9483be5f0a67c4765f Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Tue, 29 Sep 2020 17:25:22 +0900
+Subject: [PATCH] mwifiex: pcie: add reset_d3cold quirk for Surface gen4+
+ devices
+
+To reset mwifiex on Surface gen4+ (Pro 4 or later gen) devices, it
+seems that putting the wifi device into D3cold is required according
+to errata.inf file on Windows installation (Windows/INF/errata.inf).
+
+This patch adds a function that performs power-cycle (put into D3cold
+then D0) and call the function at the end of reset_prepare().
+
+Note: Need to also reset the parent device (bridge) of wifi on SB1;
+it might be because the bridge of wifi always reports it's in D3hot.
+When I tried to reset only the wifi device (not touching parent), it gave
+the following error and the reset failed:
+
+    acpi device:4b: Cannot transition to power state D0 for parent in D3hot
+    mwifiex_pcie 0000:03:00.0: can't change power state from D3cold to D0 (config space inaccessible)
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |  7 ++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 73 +++++++++++++++++--
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  3 +-
+ 3 files changed, 74 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 41c71fbea9c1..ac749da17072 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -528,6 +528,13 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
+ 	mwifiex_shutdown_sw(adapter);
+ 	clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ 	clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
++
++	/* For Surface gen4+ devices, we need to put wifi into D3cold right
++	 * before performing FLR
++	 */
++	if (card->quirks & QUIRK_FW_RST_D3COLD)
++		mwifiex_pcie_reset_d3cold_quirk(pdev);
++
+ 	mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ 
+ 	card->pci_reset_ongoing = true;
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index 929aee2b0a60..edc739c542fe 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -21,7 +21,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5",
+@@ -30,7 +30,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5 (LTE)",
+@@ -39,7 +39,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 6",
+@@ -47,7 +47,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Book 1",
+@@ -55,7 +55,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Book 2",
+@@ -63,7 +63,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 1",
+@@ -71,7 +71,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 2",
+@@ -79,7 +79,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface 3",
+@@ -111,4 +111,61 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 
+ 	if (!card->quirks)
+ 		dev_info(&pdev->dev, "no quirks enabled\n");
++	if (card->quirks & QUIRK_FW_RST_D3COLD)
++		dev_info(&pdev->dev, "quirk reset_d3cold enabled\n");
++}
++
++static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
++{
++	dev_info(&pdev->dev, "putting into D3cold...\n");
++
++	pci_save_state(pdev);
++	if (pci_is_enabled(pdev))
++		pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3cold);
++}
++
++static int mwifiex_pcie_set_power_d0(struct pci_dev *pdev)
++{
++	int ret;
++
++	dev_info(&pdev->dev, "putting into D0...\n");
++
++	pci_set_power_state(pdev, PCI_D0);
++	ret = pci_enable_device(pdev);
++	if (ret) {
++		dev_err(&pdev->dev, "pci_enable_device failed\n");
++		return ret;
++	}
++	pci_restore_state(pdev);
++
++	return 0;
++}
++
++int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev)
++{
++	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
++	int ret;
++
++	/* Power-cycle (put into D3cold then D0) */
++	dev_info(&pdev->dev, "Using reset_d3cold quirk to perform FW reset\n");
++
++	/* We need to perform power-cycle also for bridge of wifi because
++	 * on some devices (e.g. Surface Book 1), the OS for some reasons
++	 * can't know the real power state of the bridge.
++	 * When tried to power-cycle only wifi, the reset failed with the
++	 * following dmesg log:
++	 * "Cannot transition to power state D0 for parent in D3hot".
++	 */
++	mwifiex_pcie_set_power_d3cold(pdev);
++	mwifiex_pcie_set_power_d3cold(parent_pdev);
++
++	ret = mwifiex_pcie_set_power_d0(parent_pdev);
++	if (ret)
++		return ret;
++	ret = mwifiex_pcie_set_power_d0(pdev);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 5326ae7e5671..8b9dcb5070d8 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -6,6 +6,7 @@
+ #include "pcie.h"
+ 
+ /* quirks */
+-// quirk flags can be added here
++#define QUIRK_FW_RST_D3COLD	BIT(0)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
++int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
+-- 
+2.30.1
+
+From 85d61cb36f4e9fb4891b5f9cfe099bb9075c742f Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Tue, 29 Sep 2020 17:32:22 +0900
+Subject: [PATCH] mwifiex: pcie: add reset_wsid quirk for Surface 3
+
+This commit adds reset_wsid quirk and uses this quirk for Surface 3 on
+card reset.
+
+To reset mwifiex on Surface 3, it seems that calling the _DSM method
+exists in \_SB.WSID [1] device is required.
+
+On Surface 3, calling the _DSM method removes/re-probes the card by
+itself. So, need to place the reset function before performing FLR and
+skip performing any other reset-related works.
+
+Note that Surface Pro 3 also has the WSID device [2], but it seems to need
+more work. This commit only supports Surface 3 yet.
+
+[1] https://github.com/linux-surface/acpidumps/blob/05cba925f3a515f222acb5b3551a032ddde958fe/surface_3/dsdt.dsl#L11947-L12011
+[2] https://github.com/linux-surface/acpidumps/blob/05cba925f3a515f222acb5b3551a032ddde958fe/surface_pro_3/dsdt.dsl#L12164-L12216
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   | 10 +++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 77 ++++++++++++++++++-
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  5 ++
+ 3 files changed, 91 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index ac749da17072..bf9ef4ede3f1 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -2969,6 +2969,16 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
+ {
+ 	struct pcie_service_card *card = adapter->card;
+ 
++	/* On Surface 3, reset_wsid method removes then re-probes card by
++	 * itself. So, need to place it here and skip performing any other
++	 * reset-related works.
++	 */
++	if (card->quirks & QUIRK_FW_RST_WSID_S3) {
++		mwifiex_pcie_reset_wsid_quirk(card->dev);
++		/* skip performing any other reset-related works */
++		return;
++	}
++
+ 	/* We can't afford to wait here; remove() might be waiting on us. If we
+ 	 * can't grab the device lock, maybe we'll get another chance later.
+ 	 */
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index edc739c542fe..f0a6fa0a7ae5 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -9,10 +9,21 @@
+  * down, or causes NULL ptr deref).
+  */
+ 
++#include <linux/acpi.h>
+ #include <linux/dmi.h>
+ 
+ #include "pcie_quirks.h"
+ 
++/* For reset_wsid quirk */
++#define ACPI_WSID_PATH		"\\_SB.WSID"
++#define WSID_REV		0x0
++#define WSID_FUNC_WIFI_PWR_OFF	0x1
++#define WSID_FUNC_WIFI_PWR_ON	0x2
++/* WSID _DSM UUID: "534ea3bf-fcc2-4e7a-908f-a13978f0c7ef" */
++static const guid_t wsid_dsm_guid =
++	GUID_INIT(0x534ea3bf, 0xfcc2, 0x4e7a,
++		  0x90, 0x8f, 0xa1, 0x39, 0x78, 0xf0, 0xc7, 0xef);
++
+ /* quirk table based on DMI matching */
+ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 	{
+@@ -87,7 +98,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 3",
+@@ -113,6 +124,9 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 		dev_info(&pdev->dev, "no quirks enabled\n");
+ 	if (card->quirks & QUIRK_FW_RST_D3COLD)
+ 		dev_info(&pdev->dev, "quirk reset_d3cold enabled\n");
++	if (card->quirks & QUIRK_FW_RST_WSID_S3)
++		dev_info(&pdev->dev,
++			 "quirk reset_wsid for Surface 3 enabled\n");
+ }
+ 
+ static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
+@@ -169,3 +183,64 @@ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev)
+ 
+ 	return 0;
+ }
++
++int mwifiex_pcie_reset_wsid_quirk(struct pci_dev *pdev)
++{
++	acpi_handle handle;
++	union acpi_object *obj;
++	acpi_status status;
++
++	dev_info(&pdev->dev, "Using reset_wsid quirk to perform FW reset\n");
++
++	status = acpi_get_handle(NULL, ACPI_WSID_PATH, &handle);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "No ACPI handle for path %s\n",
++			ACPI_WSID_PATH);
++		return -ENODEV;
++	}
++
++	if (!acpi_has_method(handle, "_DSM")) {
++		dev_err(&pdev->dev, "_DSM method not found\n");
++		return -ENODEV;
++	}
++
++	if (!acpi_check_dsm(handle, &wsid_dsm_guid,
++			    WSID_REV, WSID_FUNC_WIFI_PWR_OFF)) {
++		dev_err(&pdev->dev,
++			"_DSM method doesn't support wifi power off func\n");
++		return -ENODEV;
++	}
++
++	if (!acpi_check_dsm(handle, &wsid_dsm_guid,
++			    WSID_REV, WSID_FUNC_WIFI_PWR_ON)) {
++		dev_err(&pdev->dev,
++			"_DSM method doesn't support wifi power on func\n");
++		return -ENODEV;
++	}
++
++	/* card will be removed immediately after this call on Surface 3 */
++	dev_info(&pdev->dev, "turning wifi off...\n");
++	obj = acpi_evaluate_dsm(handle, &wsid_dsm_guid,
++				WSID_REV, WSID_FUNC_WIFI_PWR_OFF,
++				NULL);
++	if (!obj) {
++		dev_err(&pdev->dev,
++			"device _DSM execution failed for turning wifi off\n");
++		return -EIO;
++	}
++	ACPI_FREE(obj);
++
++	/* card will be re-probed immediately after this call on Surface 3 */
++	dev_info(&pdev->dev, "turning wifi on...\n");
++	obj = acpi_evaluate_dsm(handle, &wsid_dsm_guid,
++				WSID_REV, WSID_FUNC_WIFI_PWR_ON,
++				NULL);
++	if (!obj) {
++		dev_err(&pdev->dev,
++			"device _DSM execution failed for turning wifi on\n");
++		return -EIO;
++	}
++	ACPI_FREE(obj);
++
++	return 0;
++}
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 8b9dcb5070d8..3ef7440418e3 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -7,6 +7,11 @@
+ 
+ /* quirks */
+ #define QUIRK_FW_RST_D3COLD	BIT(0)
++/* Surface 3 and Surface Pro 3 have the same _DSM method but need to
++ * be handled differently. Currently, only S3 is supported.
++ */
++#define QUIRK_FW_RST_WSID_S3	BIT(1)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
+ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
++int mwifiex_pcie_reset_wsid_quirk(struct pci_dev *pdev);
+-- 
+2.30.1
+
+From 5aacb771524eb91e98b7d36107936a82f7060103 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Wed, 30 Sep 2020 18:08:24 +0900
+Subject: [PATCH] mwifiex: pcie: (OEMB) add quirk for Surface 3 with broken DMI
+ table
+
+(made referring to http://git.osdn.net/view?p=android-x86/kernel.git;a=commitdiff;h=18e2e857c57633b25b3b4120f212224a108cd883)
+
+On some Surface 3, the DMI table gets corrupted for unknown reasons
+and breaks existing DMI matching used for device-specific quirks.
+
+This commit adds the (broken) DMI info for the affected Surface 3.
+
+On affected systems, DMI info will look like this:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:OEMB
+    /sys/devices/virtual/dmi/id/board_vendor:OEMB
+    /sys/devices/virtual/dmi/id/chassis_vendor:OEMB
+    /sys/devices/virtual/dmi/id/product_name:OEMB
+    /sys/devices/virtual/dmi/id/sys_vendor:OEMB
+
+Expected:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:Surface 3
+    /sys/devices/virtual/dmi/id/board_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/chassis_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/product_name:Surface 3
+    /sys/devices/virtual/dmi/id/sys_vendor:Microsoft Corporation
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie_quirks.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index f0a6fa0a7ae5..34dcd84f02a6 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -100,6 +100,15 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
+ 	},
++	{
++		.ident = "Surface 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
++	},
+ 	{
+ 		.ident = "Surface Pro 3",
+ 		.matches = {
+-- 
+2.30.1
+
+From e929da72221262907056171eed43956850d4490c Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Thu, 24 Sep 2020 01:56:34 +0900
+Subject: [PATCH] mwifiex: pcie: use shutdown_sw()/reinit_sw() on
+ suspend()/resume()
+
+There are issues with S0ix achievement and AP scanning after suspend
+with the current Host Sleep method.
+
+When using the Host Sleep method, it prevents the platform to reach S0ix
+during suspend. Also, after suspend, sometimes AP scanning won't work,
+resulting in non-working wifi.
+
+To fix such issues, perform shutdown_sw()/reinit_sw() instead of Host
+Sleep.
+
+As a side effect, this patch disables wakeups (means that Wake-On-WLAN
+can't be used anymore, if it was working before), and might also reset
+some internal states.
+
+Note that suspend() no longer checks if it's already suspended.
+
+With the previous Host Sleep method, the check was done by looking at
+adapter->hs_activated in mwifiex_enable_hs() [sta_ioctl.c], but not
+MWIFIEX_IS_SUSPENDED. So, what the previous method checked was instead
+Host Sleep state, not suspend itself. Therefore, there is no need to check
+the suspend state now.
+
+Also removed comment for suspend state check at top of suspend()
+accordingly.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 29 +++++++--------------
+ 1 file changed, 10 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index bf9ef4ede3f1..30a602a18f9d 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -294,8 +294,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
+  * registered functions must have drivers with suspend and resume
+  * methods. Failing that the kernel simply removes the whole card.
+  *
+- * If already not suspended, this function allocates and sends a host
+- * sleep activate request to the firmware and turns off the traffic.
++ * This function shuts down the adapter.
+  */
+ static int mwifiex_pcie_suspend(struct device *dev)
+ {
+@@ -303,31 +302,21 @@ static int mwifiex_pcie_suspend(struct device *dev)
+ 	struct pcie_service_card *card = dev_get_drvdata(dev);
+ 
+ 
+-	/* Might still be loading firmware */
+-	wait_for_completion(&card->fw_done);
+-
+ 	adapter = card->adapter;
+ 	if (!adapter) {
+ 		dev_err(dev, "adapter is not valid\n");
+ 		return 0;
+ 	}
+ 
+-	mwifiex_enable_wake(adapter);
+-
+-	/* Enable the Host Sleep */
+-	if (!mwifiex_enable_hs(adapter)) {
++	/* Shut down SW */
++	if (mwifiex_shutdown_sw(adapter)) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "cmd: failed to suspend\n");
+-		clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
+-		mwifiex_disable_wake(adapter);
+ 		return -EFAULT;
+ 	}
+ 
+-	flush_workqueue(adapter->workqueue);
+-
+ 	/* Indicate device suspended */
+ 	set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+-	clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
+ 
+ 	return 0;
+ }
+@@ -337,13 +326,13 @@ static int mwifiex_pcie_suspend(struct device *dev)
+  * registered functions must have drivers with suspend and resume
+  * methods. Failing that the kernel simply removes the whole card.
+  *
+- * If already not resumed, this function turns on the traffic and
+- * sends a host sleep cancel request to the firmware.
++ * If already not resumed, this function reinits the adapter.
+  */
+ static int mwifiex_pcie_resume(struct device *dev)
+ {
+ 	struct mwifiex_adapter *adapter;
+ 	struct pcie_service_card *card = dev_get_drvdata(dev);
++	int ret;
+ 
+ 
+ 	if (!card->adapter) {
+@@ -361,9 +350,11 @@ static int mwifiex_pcie_resume(struct device *dev)
+ 
+ 	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ 
+-	mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+-			  MWIFIEX_ASYNC_CMD);
+-	mwifiex_disable_wake(adapter);
++	ret = mwifiex_reinit_sw(adapter);
++	if (ret)
++		dev_err(dev, "reinit failed: %d\n", ret);
++	else
++		mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ 
+ 	return 0;
+ }
+-- 
+2.30.1
+
+From 7a3800eb38818462b06617629d5d797f3be481c7 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Mon, 24 Aug 2020 17:11:35 +0900
+Subject: [PATCH] mwifiex: pcie: add enable_device_dump module parameter
+
+The devicve_dump may take a little bit long time and users may want to
+disable the dump for daily usage.
+
+This commit adds a new module parameter and disables device_dump by
+default.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 30a602a18f9d..113636355dec 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -183,6 +183,11 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ 	.can_ext_scan = true,
+ };
+ 
++static bool enable_device_dump;
++module_param(enable_device_dump, bool, 0644);
++MODULE_PARM_DESC(enable_device_dump,
++		 "enable device_dump (default: disabled)");
++
+ static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+ 	{ .compatible = "pci11ab,2b42" },
+ 	{ .compatible = "pci1b4b,2b42" },
+@@ -2943,6 +2948,12 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+ 
+ static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+ {
++	if (!enable_device_dump) {
++		mwifiex_dbg(adapter, MSG,
++			    "device_dump is disabled by module parameter\n");
++		return;
++	}
++
+ 	adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ 	if (!adapter->devdump_data) {
+ 		mwifiex_dbg(adapter, ERROR,
+-- 
+2.30.1
+
+From 90a16cfa6779902faa82b5b458cd03bda6112c98 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:11:49 +0900
+Subject: [PATCH] mwifiex: pcie: disable bridge_d3 for Surface gen4+
+
+Currently, mwifiex fw will crash after suspend on recent kernel series.
+On Windows, it seems that the root port of wifi will never enter D3 state
+(stay on D0 state). And on Linux, disabling the D3 state for the
+bridge fixes fw crashing after suspend.
+
+This commit disables the D3 state of root port on driver initialization
+and fixes fw crashing after suspend.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |  7 +++++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 27 +++++++++++++------
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  1 +
+ 3 files changed, 27 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 113636355dec..1a44ca3e66f3 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -375,6 +375,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 					const struct pci_device_id *ent)
+ {
+ 	struct pcie_service_card *card;
++	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
+ 	int ret;
+ 
+ 	pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
+@@ -416,6 +417,12 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 		return -1;
+ 	}
+ 
++	/* disable bridge_d3 for Surface gen4+ devices to fix fw crashing
++	 * after suspend
++	 */
++	if (card->quirks & QUIRK_NO_BRIDGE_D3)
++		parent_pdev->bridge_d3 = false;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index 34dcd84f02a6..a2aeb2af907e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -32,7 +32,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5",
+@@ -41,7 +42,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5 (LTE)",
+@@ -50,7 +52,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 6",
+@@ -58,7 +61,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Book 1",
+@@ -66,7 +70,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Book 2",
+@@ -74,7 +79,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 1",
+@@ -82,7 +88,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 2",
+@@ -90,7 +97,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface 3",
+@@ -136,6 +144,9 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 	if (card->quirks & QUIRK_FW_RST_WSID_S3)
+ 		dev_info(&pdev->dev,
+ 			 "quirk reset_wsid for Surface 3 enabled\n");
++	if (card->quirks & QUIRK_NO_BRIDGE_D3)
++		dev_info(&pdev->dev,
++			 "quirk no_brigde_d3 enabled\n");
+ }
+ 
+ static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 3ef7440418e3..a95ebac06e13 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -11,6 +11,7 @@
+  * be handled differently. Currently, only S3 is supported.
+  */
+ #define QUIRK_FW_RST_WSID_S3	BIT(1)
++#define QUIRK_NO_BRIDGE_D3	BIT(2)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
+ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
+-- 
+2.30.1
+
+From 1b8dd96c2bd602eb6874fe3cff321a382fbd5533 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:25:48 +0900
+Subject: [PATCH] mwifiex: add allow_ps_mode module parameter
+
+This commit adds the allow_ps_mode module parameter and set it false
+(disallowed) by default, to make ps_mode (power_save) control easier.
+
+On some setups (e.g., with 5GHz AP), power_save causes connection
+completely unstable. So, we need to disable it. However, userspace tools
+may try to enable it. For this reason, we need to tell userspace that
+power_save is disallowed by default.
+
+When this parameter is set to false, changing the power_save mode will
+be disallowed like the following:
+
+    $ sudo iw dev mlan0 set power_save on
+    command failed: Operation not permitted (-1)
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index a6b9dc6700b1..943bc1e8ceae 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -25,6 +25,11 @@
+ static char *reg_alpha2;
+ module_param(reg_alpha2, charp, 0);
+ 
++static bool allow_ps_mode;
++module_param(allow_ps_mode, bool, 0644);
++MODULE_PARM_DESC(allow_ps_mode,
++		 "allow WiFi power management to be enabled. (default: disallowed)");
++
+ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
+ 	{
+ 		.max = MWIFIEX_MAX_BSS_NUM,
+@@ -435,6 +440,17 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ 
+ 	ps_mode = enabled;
+ 
++	/* Allow ps_mode to be enabled only when allow_ps_mode is true */
++	if (ps_mode && !allow_ps_mode) {
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Enabling ps_mode disallowed by modparam\n");
++
++		/* Return -EPERM to inform userspace tools that setting
++		 * power_save to be enabled is not permitted.
++		 */
++		return -EPERM;
++	}
++
+ 	return mwifiex_drv_set_power(priv, &ps_mode);
+ }
+ 
+-- 
+2.30.1
+
+From ef208c596398830c5f7aaa232de061fe2b538dfd Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:38:48 +0900
+Subject: [PATCH] mwifiex: print message when changing ps_mode
+
+Users may want to know the ps_mode state change (e.g., diagnosing
+connection issues). This commit adds the print when changing ps_mode.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 943bc1e8ceae..a2eb8df8d385 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -451,6 +451,13 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ 		return -EPERM;
+ 	}
+ 
++	if (ps_mode)
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Enabling ps_mode, disable if unstable.\n");
++	else
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Disabling ps_mode.\n");
++
+ 	return mwifiex_drv_set_power(priv, &ps_mode);
+ }
+ 
+-- 
+2.30.1
+
+From 0e1fc8f028817f9682711d21be5c703a7ea6fddc Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:59:37 +0900
+Subject: [PATCH] mwifiex: disable ps_mode explicitly by default instead
+
+At least on Surface devices, the ps_mode causes connection unstable,
+especially with 5GHz APs. Then, it eventually causes fw crashing.
+
+This commit disables ps_mode by default instead of enabling it.
+
+Required code is extracted from mwifiex_drv_set_power().
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+index d3a968ef21ef..9b7b52fbc9c4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+@@ -2333,14 +2333,19 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
+ 			return -1;
+ 
+ 		if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+-			/* Enable IEEE PS by default */
+-			priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
++			/* Disable IEEE PS by default */
++			priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
+ 			ret = mwifiex_send_cmd(priv,
+ 					       HostCmd_CMD_802_11_PS_MODE_ENH,
+-					       EN_AUTO_PS, BITMAP_STA_PS, NULL,
++					       DIS_AUTO_PS, BITMAP_STA_PS, NULL,
+ 					       true);
+ 			if (ret)
+ 				return -1;
++			ret = mwifiex_send_cmd(priv,
++					       HostCmd_CMD_802_11_PS_MODE_ENH,
++					       GET_PS, 0, NULL, false);
++			if (ret)
++				return -1;
+ 		}
+ 
+ 		if (drcs) {
+-- 
+2.30.1
+

+ 1361 - 0
patches/5.11/0003-ipts.patch

@@ -0,0 +1,1361 @@
+From 886d61e91ee345da44d3c0a4ad655fe4fb6d8519 Mon Sep 17 00:00:00 2001
+From: Dorian Stoll <dorian.stoll@tmsp.io>
+Date: Thu, 30 Jul 2020 13:21:53 +0200
+Subject: [PATCH] misc: mei: Add missing IPTS device IDs
+
+Patchset: ipts
+---
+ drivers/misc/mei/hw-me-regs.h | 1 +
+ drivers/misc/mei/pci-me.c     | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 9cf8d8f60cfe..ca2d4faff6a2 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -92,6 +92,7 @@
+ #define MEI_DEV_ID_CDF        0x18D3  /* Cedar Fork */
+ 
+ #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
++#define MEI_DEV_ID_ICP_LP_3   0x34E4  /* Ice Lake Point LP 3 (iTouch) */
+ 
+ #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
+ 
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1de9ef7a272b..e12484840f88 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP_3, MEI_ME_PCH12_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
+-- 
+2.30.1
+
+From 92d426c6bb955e76e632652e4603e4fb392f4465 Mon Sep 17 00:00:00 2001
+From: Dorian Stoll <dorian.stoll@tmsp.io>
+Date: Thu, 6 Aug 2020 11:20:41 +0200
+Subject: [PATCH] misc: Add support for Intel Precise Touch & Stylus
+
+Based on linux-surface/intel-precise-touch@3f362c
+
+Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
+Patchset: ipts
+---
+ drivers/misc/Kconfig          |   1 +
+ drivers/misc/Makefile         |   1 +
+ drivers/misc/ipts/Kconfig     |  17 ++
+ drivers/misc/ipts/Makefile    |  12 ++
+ drivers/misc/ipts/context.h   |  47 ++++++
+ drivers/misc/ipts/control.c   |  76 +++++++++
+ drivers/misc/ipts/control.h   |  22 +++
+ drivers/misc/ipts/mei.c       | 118 ++++++++++++++
+ drivers/misc/ipts/protocol.h  | 284 ++++++++++++++++++++++++++++++++++
+ drivers/misc/ipts/receiver.c  | 188 ++++++++++++++++++++++
+ drivers/misc/ipts/receiver.h  |  16 ++
+ drivers/misc/ipts/resources.c | 128 +++++++++++++++
+ drivers/misc/ipts/resources.h |  17 ++
+ drivers/misc/ipts/uapi.c      | 211 +++++++++++++++++++++++++
+ drivers/misc/ipts/uapi.h      |  47 ++++++
+ 15 files changed, 1185 insertions(+)
+ create mode 100644 drivers/misc/ipts/Kconfig
+ create mode 100644 drivers/misc/ipts/Makefile
+ create mode 100644 drivers/misc/ipts/context.h
+ create mode 100644 drivers/misc/ipts/control.c
+ create mode 100644 drivers/misc/ipts/control.h
+ create mode 100644 drivers/misc/ipts/mei.c
+ create mode 100644 drivers/misc/ipts/protocol.h
+ create mode 100644 drivers/misc/ipts/receiver.c
+ create mode 100644 drivers/misc/ipts/receiver.h
+ create mode 100644 drivers/misc/ipts/resources.c
+ create mode 100644 drivers/misc/ipts/resources.h
+ create mode 100644 drivers/misc/ipts/uapi.c
+ create mode 100644 drivers/misc/ipts/uapi.h
+
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index fafa8b0d8099..c795c56e8d42 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -481,4 +481,5 @@ source "drivers/misc/ocxl/Kconfig"
+ source "drivers/misc/cardreader/Kconfig"
+ source "drivers/misc/habanalabs/Kconfig"
+ source "drivers/misc/uacce/Kconfig"
++source "drivers/misc/ipts/Kconfig"
+ endmenu
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index d23231e73330..9e6e3e2f2ea9 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -57,3 +57,4 @@ obj-$(CONFIG_HABANA_AI)		+= habanalabs/
+ obj-$(CONFIG_UACCE)		+= uacce/
+ obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
+ obj-$(CONFIG_HISI_HIKEY_USB)	+= hisi_hikey_usb.o
++obj-$(CONFIG_MISC_IPTS)		+= ipts/
+diff --git a/drivers/misc/ipts/Kconfig b/drivers/misc/ipts/Kconfig
+new file mode 100644
+index 000000000000..83e2a930c396
+--- /dev/null
++++ b/drivers/misc/ipts/Kconfig
+@@ -0,0 +1,17 @@
++# SPDX-License-Identifier: GPL-2.0-or-later
++
++config MISC_IPTS
++	tristate "Intel Precise Touch & Stylus"
++	depends on INTEL_MEI
++	help
++	  Say Y here if your system has a touchscreen using Intels
++	  Precise Touch & Stylus (IPTS) technology.
++
++	  If unsure say N.
++
++	  To compile this driver as a module, choose M here: the
++	  module will be called ipts.
++
++	  Building this driver alone will not give you a working touchscreen.
++	  It only exposed a userspace API that can be used by a daemon to
++	  receive and process data from the touchscreen hardware.
+diff --git a/drivers/misc/ipts/Makefile b/drivers/misc/ipts/Makefile
+new file mode 100644
+index 000000000000..8f58b9adbc94
+--- /dev/null
++++ b/drivers/misc/ipts/Makefile
+@@ -0,0 +1,12 @@
++# SPDX-License-Identifier: GPL-2.0-or-later
++#
++# Makefile for the IPTS touchscreen driver
++#
++
++obj-$(CONFIG_MISC_IPTS) += ipts.o
++ipts-objs := control.o
++ipts-objs += mei.o
++ipts-objs += receiver.o
++ipts-objs += resources.o
++ipts-objs += uapi.o
++
+diff --git a/drivers/misc/ipts/context.h b/drivers/misc/ipts/context.h
+new file mode 100644
+index 000000000000..f4b06a2d3f72
+--- /dev/null
++++ b/drivers/misc/ipts/context.h
+@@ -0,0 +1,47 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_CONTEXT_H_
++#define _IPTS_CONTEXT_H_
++
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/mei_cl_bus.h>
++#include <linux/types.h>
++
++#include "protocol.h"
++
++enum ipts_host_status {
++	IPTS_HOST_STATUS_STARTING,
++	IPTS_HOST_STATUS_STARTED,
++	IPTS_HOST_STATUS_STOPPING,
++	IPTS_HOST_STATUS_STOPPED,
++};
++
++struct ipts_buffer_info {
++	u8 *address;
++	dma_addr_t dma_address;
++};
++
++struct ipts_context {
++	struct mei_cl_device *cldev;
++	struct device *dev;
++
++	bool restart;
++	enum ipts_host_status status;
++	struct ipts_get_device_info_rsp device_info;
++
++	struct ipts_buffer_info data[IPTS_BUFFERS];
++	struct ipts_buffer_info doorbell;
++
++	struct ipts_buffer_info feedback[IPTS_BUFFERS];
++	struct ipts_buffer_info workqueue;
++	struct ipts_buffer_info host2me;
++};
++
++#endif /* _IPTS_CONTEXT_H_ */
+diff --git a/drivers/misc/ipts/control.c b/drivers/misc/ipts/control.c
+new file mode 100644
+index 000000000000..24d3d19a667a
+--- /dev/null
++++ b/drivers/misc/ipts/control.c
+@@ -0,0 +1,76 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/mei_cl_bus.h>
++
++#include "context.h"
++#include "protocol.h"
++#include "resources.h"
++#include "uapi.h"
++
++int ipts_control_send(struct ipts_context *ipts, u32 code, void *payload,
++		      size_t size)
++{
++	int ret;
++	struct ipts_command cmd;
++
++	memset(&cmd, 0, sizeof(struct ipts_command));
++	cmd.code = code;
++
++	if (payload && size > 0)
++		memcpy(&cmd.payload, payload, size);
++
++	ret = mei_cldev_send(ipts->cldev, (u8 *)&cmd, sizeof(cmd.code) + size);
++	if (ret >= 0 || ret == -EINTR)
++		return 0;
++
++	dev_err(ipts->dev, "Error while sending: 0x%X:%d\n", code, ret);
++	return ret;
++}
++
++int ipts_control_start(struct ipts_context *ipts)
++{
++	if (ipts->status != IPTS_HOST_STATUS_STOPPED)
++		return -EBUSY;
++
++	dev_info(ipts->dev, "Starting IPTS\n");
++	ipts->status = IPTS_HOST_STATUS_STARTING;
++	ipts->restart = false;
++
++	ipts_uapi_link(ipts);
++	return ipts_control_send(ipts, IPTS_CMD_GET_DEVICE_INFO, NULL, 0);
++}
++
++int ipts_control_stop(struct ipts_context *ipts)
++{
++	if (ipts->status == IPTS_HOST_STATUS_STOPPING)
++		return -EBUSY;
++
++	if (ipts->status == IPTS_HOST_STATUS_STOPPED)
++		return -EBUSY;
++
++	dev_info(ipts->dev, "Stopping IPTS\n");
++	ipts->status = IPTS_HOST_STATUS_STOPPING;
++
++	ipts_uapi_unlink();
++	ipts_resources_free(ipts);
++
++	if (!mei_cldev_enabled(ipts->cldev))
++		return 0;
++
++	return ipts_control_send(ipts, IPTS_CMD_CLEAR_MEM_WINDOW, NULL, 0);
++}
++
++int ipts_control_restart(struct ipts_context *ipts)
++{
++	if (ipts->restart)
++		return -EBUSY;
++
++	ipts->restart = true;
++	return ipts_control_stop(ipts);
++}
+diff --git a/drivers/misc/ipts/control.h b/drivers/misc/ipts/control.h
+new file mode 100644
+index 000000000000..4ee0ceb84749
+--- /dev/null
++++ b/drivers/misc/ipts/control.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_CONTROL_H_
++#define _IPTS_CONTROL_H_
++
++#include <linux/types.h>
++
++#include "context.h"
++
++int ipts_control_send(struct ipts_context *ipts, u32 cmd, void *payload,
++		      size_t size);
++int ipts_control_start(struct ipts_context *ipts);
++int ipts_control_restart(struct ipts_context *ipts);
++int ipts_control_stop(struct ipts_context *ipts);
++
++#endif /* _IPTS_CONTROL_H_ */
+diff --git a/drivers/misc/ipts/mei.c b/drivers/misc/ipts/mei.c
+new file mode 100644
+index 000000000000..2945809d5b46
+--- /dev/null
++++ b/drivers/misc/ipts/mei.c
+@@ -0,0 +1,118 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/mei_cl_bus.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/slab.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "receiver.h"
++#include "uapi.h"
++
++static int ipts_mei_set_dma_mask(struct mei_cl_device *cldev)
++{
++	int ret;
++
++	ret = dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(64));
++	if (!ret)
++		return 0;
++
++	return dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(32));
++}
++
++static int ipts_mei_probe(struct mei_cl_device *cldev,
++			  const struct mei_cl_device_id *id)
++{
++	int ret;
++	struct ipts_context *ipts;
++
++	if (ipts_mei_set_dma_mask(cldev)) {
++		dev_err(&cldev->dev, "Failed to set DMA mask for IPTS\n");
++		return -EFAULT;
++	}
++
++	ret = mei_cldev_enable(cldev);
++	if (ret) {
++		dev_err(&cldev->dev, "Failed to enable MEI device: %d\n", ret);
++		return ret;
++	}
++
++	ipts = kzalloc(sizeof(*ipts), GFP_KERNEL);
++	if (!ipts) {
++		mei_cldev_disable(cldev);
++		return -ENOMEM;
++	}
++
++	ipts->cldev = cldev;
++	ipts->dev = &cldev->dev;
++	ipts->status = IPTS_HOST_STATUS_STOPPED;
++
++	mei_cldev_set_drvdata(cldev, ipts);
++	mei_cldev_register_rx_cb(cldev, ipts_receiver_callback);
++
++	return ipts_control_start(ipts);
++}
++
++static int ipts_mei_remove(struct mei_cl_device *cldev)
++{
++	struct ipts_context *ipts = mei_cldev_get_drvdata(cldev);
++
++	mei_cldev_disable(cldev);
++	ipts_control_stop(ipts);
++	kfree(ipts);
++
++	return 0;
++}
++
++static struct mei_cl_device_id ipts_mei_device_id_table[] = {
++	{ "", IPTS_MEI_UUID, MEI_CL_VERSION_ANY },
++	{},
++};
++MODULE_DEVICE_TABLE(mei, ipts_mei_device_id_table);
++
++static struct mei_cl_driver ipts_mei_driver = {
++	.id_table = ipts_mei_device_id_table,
++	.name = "ipts",
++	.probe = ipts_mei_probe,
++	.remove = ipts_mei_remove,
++};
++
++static int __init ipts_mei_init(void)
++{
++	int ret;
++
++	ret = ipts_uapi_init();
++	if (ret)
++		return ret;
++
++	ret = mei_cldev_driver_register(&ipts_mei_driver);
++	if (ret) {
++		ipts_uapi_free();
++		return ret;
++	}
++
++	return 0;
++}
++
++static void __exit ipts_mei_exit(void)
++{
++	mei_cldev_driver_unregister(&ipts_mei_driver);
++	ipts_uapi_free();
++}
++
++MODULE_DESCRIPTION("IPTS touchscreen driver");
++MODULE_AUTHOR("Dorian Stoll <dorian.stoll@tmsp.io>");
++MODULE_LICENSE("GPL");
++
++module_init(ipts_mei_init);
++module_exit(ipts_mei_exit);
+diff --git a/drivers/misc/ipts/protocol.h b/drivers/misc/ipts/protocol.h
+new file mode 100644
+index 000000000000..381068504d46
+--- /dev/null
++++ b/drivers/misc/ipts/protocol.h
+@@ -0,0 +1,284 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_PROTOCOL_H_
++#define _IPTS_PROTOCOL_H_
++
++#include <linux/types.h>
++
++/*
++ * The MEI client ID for IPTS functionality.
++ */
++#define IPTS_MEI_UUID                                                          \
++	UUID_LE(0x3e8d0870, 0x271a, 0x4208, 0x8e, 0xb5, 0x9a, 0xcb, 0x94,      \
++		0x02, 0xae, 0x04)
++
++/*
++ * Queries the device for vendor specific information.
++ *
++ * The command must not contain any payload.
++ * The response will contain struct ipts_get_device_info_rsp as payload.
++ */
++#define IPTS_CMD_GET_DEVICE_INFO 0x00000001
++#define IPTS_RSP_GET_DEVICE_INFO 0x80000001
++
++/*
++ * Sets the mode that IPTS will operate in.
++ *
++ * The command must contain struct ipts_set_mode_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_SET_MODE 0x00000002
++#define IPTS_RSP_SET_MODE 0x80000002
++
++/*
++ * Configures the memory buffers that the ME will use
++ * for passing data to the host.
++ *
++ * The command must contain struct ipts_set_mem_window_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_SET_MEM_WINDOW 0x00000003
++#define IPTS_RSP_SET_MEM_WINDOW 0x80000003
++
++/*
++ * Signals that the host is ready to receive data to the ME.
++ *
++ * The command must not contain any payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_READY_FOR_DATA 0x00000005
++#define IPTS_RSP_READY_FOR_DATA 0x80000005
++
++/*
++ * Signals that a buffer can be refilled to the ME.
++ *
++ * The command must contain struct ipts_feedback_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_FEEDBACK 0x00000006
++#define IPTS_RSP_FEEDBACK 0x80000006
++
++/*
++ * Resets the data flow from the ME to the hosts and
++ * clears the buffers that were set with SET_MEM_WINDOW.
++ *
++ * The command must not contain any payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_CLEAR_MEM_WINDOW 0x00000007
++#define IPTS_RSP_CLEAR_MEM_WINDOW 0x80000007
++
++/*
++ * Instructs the ME to reset the touch sensor.
++ *
++ * The command must contain struct ipts_reset_sensor_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_RESET_SENSOR 0x0000000B
++#define IPTS_RSP_RESET_SENSOR 0x8000000B
++
++/**
++ * enum ipts_status - Possible status codes returned by IPTS commands.
++ * @IPTS_STATUS_SUCCESS:                 Operation completed successfully.
++ * @IPTS_STATUS_INVALID_PARAMS:          Command contained a payload with invalid parameters.
++ * @IPTS_STATUS_ACCESS_DENIED:           ME could not validate buffer addresses supplied by host.
++ * @IPTS_STATUS_CMD_SIZE_ERROR:          Command contains an invalid payload.
++ * @IPTS_STATUS_NOT_READY:               Buffer addresses have not been set.
++ * @IPTS_STATUS_REQUEST_OUTSTANDING:     There is an outstanding command of the same type.
++ *                                       The host must wait for a response before sending another
++ *                                       command of the same type.
++ * @IPTS_STATUS_NO_SENSOR_FOUND:         No sensor could be found. Either no sensor is connected, it
++ *                                       has not been initialized yet, or the system is improperly
++ *                                       configured.
++ * @IPTS_STATUS_OUT_OF_MEMORY:           Not enough free memory for requested operation.
++ * @IPTS_STATUS_INTERNAL_ERROR:          An unexpected error occurred.
++ * @IPTS_STATUS_SENSOR_DISABLED:         The sensor has been disabled and must be reinitialized.
++ * @IPTS_STATUS_COMPAT_CHECK_FAIL:       Compatibility revision check between sensor and ME failed.
++ *                                       The host can ignore this error and attempt to continue.
++ * @IPTS_STATUS_SENSOR_EXPECTED_RESET:   The sensor went through a reset initiated by ME or host.
++ * @IPTS_STATUS_SENSOR_UNEXPECTED_RESET: The sensor went through an unexpected reset.
++ * @IPTS_STATUS_RESET_FAILED:            Requested sensor reset failed to complete.
++ * @IPTS_STATUS_TIMEOUT:                 The operation timed out.
++ * @IPTS_STATUS_TEST_MODE_FAIL:          Test mode pattern did not match expected values.
++ * @IPTS_STATUS_SENSOR_FAIL_FATAL:       The sensor reported a fatal error during reset sequence.
++ *                                       Further progress is not possible.
++ * @IPTS_STATUS_SENSOR_FAIL_NONFATAL:    The sensor reported a fatal error during reset sequence.
++ *                                       The host can attempt to continue.
++ * @IPTS_STATUS_INVALID_DEVICE_CAPS:     The device reported invalid capabilities.
++ * @IPTS_STATUS_QUIESCE_IO_IN_PROGRESS:  Command cannot be completed until Quiesce IO is done.
++ */
++enum ipts_status {
++	IPTS_STATUS_SUCCESS = 0,
++	IPTS_STATUS_INVALID_PARAMS = 1,
++	IPTS_STATUS_ACCESS_DENIED = 2,
++	IPTS_STATUS_CMD_SIZE_ERROR = 3,
++	IPTS_STATUS_NOT_READY = 4,
++	IPTS_STATUS_REQUEST_OUTSTANDING = 5,
++	IPTS_STATUS_NO_SENSOR_FOUND = 6,
++	IPTS_STATUS_OUT_OF_MEMORY = 7,
++	IPTS_STATUS_INTERNAL_ERROR = 8,
++	IPTS_STATUS_SENSOR_DISABLED = 9,
++	IPTS_STATUS_COMPAT_CHECK_FAIL = 10,
++	IPTS_STATUS_SENSOR_EXPECTED_RESET = 11,
++	IPTS_STATUS_SENSOR_UNEXPECTED_RESET = 12,
++	IPTS_STATUS_RESET_FAILED = 13,
++	IPTS_STATUS_TIMEOUT = 14,
++	IPTS_STATUS_TEST_MODE_FAIL = 15,
++	IPTS_STATUS_SENSOR_FAIL_FATAL = 16,
++	IPTS_STATUS_SENSOR_FAIL_NONFATAL = 17,
++	IPTS_STATUS_INVALID_DEVICE_CAPS = 18,
++	IPTS_STATUS_QUIESCE_IO_IN_PROGRESS = 19,
++};
++
++/*
++ * The amount of buffers that is used for IPTS
++ */
++#define IPTS_BUFFERS 16
++
++/**
++ * enum ipts_mode - Operation mode for IPTS hardware
++ * @IPTS_MODE_SINGLETOUCH: Fallback that supports only one finger and no stylus.
++ *                         The data is received as a HID report with ID 64.
++ * @IPTS_MODE_MULTITOUCH:  The "proper" operation mode for IPTS. It will return
++ *                         stylus data as well as capacitive heatmap touch data.
++ *                         This data needs to be processed in userspace.
++ */
++enum ipts_mode {
++	IPTS_MODE_SINGLETOUCH = 0,
++	IPTS_MODE_MULTITOUCH = 1,
++};
++
++/**
++ * struct ipts_set_mode_cmd - Payload for the SET_MODE command.
++ * @mode: The mode that IPTS should operate in.
++ */
++struct ipts_set_mode_cmd {
++	enum ipts_mode mode;
++	u8 reserved[12];
++} __packed;
++
++#define IPTS_WORKQUEUE_SIZE	 8192
++#define IPTS_WORKQUEUE_ITEM_SIZE 16
++
++/**
++ * struct ipts_set_mem_window_cmd - Payload for the SET_MEM_WINDOW command.
++ * @data_buffer_addr_lower:     Lower 32 bits of the data buffer addresses.
++ * @data_buffer_addr_upper:     Upper 32 bits of the data buffer addresses.
++ * @workqueue_addr_lower:       Lower 32 bits of the workqueue buffer address.
++ * @workqueue_addr_upper:       Upper 32 bits of the workqueue buffer address.
++ * @doorbell_addr_lower:        Lower 32 bits of the doorbell buffer address.
++ * @doorbell_addr_upper:        Upper 32 bits of the doorbell buffer address.
++ * @feedback_buffer_addr_lower: Lower 32 bits of the feedback buffer addresses.
++ * @feedback_buffer_addr_upper: Upper 32 bits of the feedback buffer addresses.
++ * @host2me_addr_lower:         Lower 32 bits of the host2me buffer address.
++ * @host2me_addr_upper:         Upper 32 bits of the host2me buffer address.
++ * @workqueue_item_size:        Magic value. (IPTS_WORKQUEUE_ITEM_SIZE)
++ * @workqueue_size:             Magic value. (IPTS_WORKQUEUE_SIZE)
++ *
++ * The data buffers are buffers that get filled with touch data by the ME.
++ * The doorbell buffer is a u32 that gets incremented by the ME once a data
++ * buffer has been filled with new data.
++ *
++ * The other buffers are required for using GuC submission with binary
++ * firmware. Since support for GuC submission has been dropped from i915,
++ * they are not used anymore, but they need to be allocated and passed,
++ * otherwise the hardware will refuse to start.
++ */
++struct ipts_set_mem_window_cmd {
++	u32 data_buffer_addr_lower[IPTS_BUFFERS];
++	u32 data_buffer_addr_upper[IPTS_BUFFERS];
++	u32 workqueue_addr_lower;
++	u32 workqueue_addr_upper;
++	u32 doorbell_addr_lower;
++	u32 doorbell_addr_upper;
++	u32 feedback_buffer_addr_lower[IPTS_BUFFERS];
++	u32 feedback_buffer_addr_upper[IPTS_BUFFERS];
++	u32 host2me_addr_lower;
++	u32 host2me_addr_upper;
++	u32 host2me_size;
++	u8 reserved1;
++	u8 workqueue_item_size;
++	u16 workqueue_size;
++	u8 reserved[32];
++} __packed;
++
++/**
++ * struct ipts_feedback_cmd - Payload for the FEEDBACK command.
++ * @buffer: The buffer that the ME should refill.
++ */
++struct ipts_feedback_cmd {
++	u32 buffer;
++	u8 reserved[12];
++} __packed;
++
++/**
++ * enum ipts_reset_type - Possible ways of resetting the touch sensor
++ * @IPTS_RESET_TYPE_HARD: Perform hardware reset using GPIO pin.
++ * @IPTS_RESET_TYPE_SOFT: Perform software reset using SPI interface.
++ */
++enum ipts_reset_type {
++	IPTS_RESET_TYPE_HARD = 0,
++	IPTS_RESET_TYPE_SOFT = 1,
++};
++
++/**
++ * struct ipts_reset_sensor_cmd - Payload for the RESET_SENSOR command.
++ * @type: What type of reset should be performed.
++ */
++struct ipts_reset_sensor_cmd {
++	enum ipts_reset_type type;
++	u8 reserved[4];
++} __packed;
++
++/**
++ * struct ipts_command - A message sent from the host to the ME.
++ * @code:    The message code describing the command. (see IPTS_CMD_*)
++ * @payload: Payload for the command, or 0 if no payload is required.
++ */
++struct ipts_command {
++	u32 code;
++	u8 payload[320];
++} __packed;
++
++/**
++ * struct ipts_device_info - Payload for the GET_DEVICE_INFO response.
++ * @vendor_id:     Vendor ID of the touch sensor.
++ * @device_id:     Device ID of the touch sensor.
++ * @hw_rev:        Hardware revision of the touch sensor.
++ * @fw_rev:        Firmware revision of the touch sensor.
++ * @data_size:     Required size of one data buffer.
++ * @feedback_size: Required size of one feedback buffer.
++ * @mode:          Current operation mode of IPTS.
++ * @max_contacts:  The amount of concurrent touches supported by the sensor.
++ */
++struct ipts_get_device_info_rsp {
++	u16 vendor_id;
++	u16 device_id;
++	u32 hw_rev;
++	u32 fw_rev;
++	u32 data_size;
++	u32 feedback_size;
++	enum ipts_mode mode;
++	u8 max_contacts;
++	u8 reserved[19];
++} __packed;
++
++/**
++ * struct ipts_response - A message sent from the ME to the host.
++ * @code:    The message code describing the response. (see IPTS_RSP_*)
++ * @status:  The status code returned by the command.
++ * @payload: Payload returned by the command.
++ */
++struct ipts_response {
++	u32 code;
++	enum ipts_status status;
++	u8 payload[80];
++} __packed;
++
++#endif /* _IPTS_PROTOCOL_H_ */
+diff --git a/drivers/misc/ipts/receiver.c b/drivers/misc/ipts/receiver.c
+new file mode 100644
+index 000000000000..916ba3ec211b
+--- /dev/null
++++ b/drivers/misc/ipts/receiver.c
+@@ -0,0 +1,188 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/mei_cl_bus.h>
++#include <linux/types.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "resources.h"
++
++static int ipts_receiver_handle_get_device_info(struct ipts_context *ipts,
++						struct ipts_response *rsp)
++{
++	struct ipts_set_mode_cmd cmd;
++
++	memcpy(&ipts->device_info, rsp->payload,
++	       sizeof(struct ipts_get_device_info_rsp));
++
++	memset(&cmd, 0, sizeof(struct ipts_set_mode_cmd));
++	cmd.mode = IPTS_MODE_MULTITOUCH;
++
++	return ipts_control_send(ipts, IPTS_CMD_SET_MODE, &cmd,
++				 sizeof(struct ipts_set_mode_cmd));
++}
++
++static int ipts_receiver_handle_set_mode(struct ipts_context *ipts)
++{
++	int i, ret;
++	struct ipts_set_mem_window_cmd cmd;
++
++	ret = ipts_resources_alloc(ipts);
++	if (ret) {
++		dev_err(ipts->dev, "Failed to allocate resources\n");
++		return ret;
++	}
++
++	memset(&cmd, 0, sizeof(struct ipts_set_mem_window_cmd));
++
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		cmd.data_buffer_addr_lower[i] =
++			lower_32_bits(ipts->data[i].dma_address);
++
++		cmd.data_buffer_addr_upper[i] =
++			upper_32_bits(ipts->data[i].dma_address);
++
++		cmd.feedback_buffer_addr_lower[i] =
++			lower_32_bits(ipts->feedback[i].dma_address);
++
++		cmd.feedback_buffer_addr_upper[i] =
++			upper_32_bits(ipts->feedback[i].dma_address);
++	}
++
++	cmd.workqueue_addr_lower = lower_32_bits(ipts->workqueue.dma_address);
++	cmd.workqueue_addr_upper = upper_32_bits(ipts->workqueue.dma_address);
++
++	cmd.doorbell_addr_lower = lower_32_bits(ipts->doorbell.dma_address);
++	cmd.doorbell_addr_upper = upper_32_bits(ipts->doorbell.dma_address);
++
++	cmd.host2me_addr_lower = lower_32_bits(ipts->host2me.dma_address);
++	cmd.host2me_addr_upper = upper_32_bits(ipts->host2me.dma_address);
++
++	cmd.workqueue_size = IPTS_WORKQUEUE_SIZE;
++	cmd.workqueue_item_size = IPTS_WORKQUEUE_ITEM_SIZE;
++
++	return ipts_control_send(ipts, IPTS_CMD_SET_MEM_WINDOW, &cmd,
++				 sizeof(struct ipts_set_mem_window_cmd));
++}
++
++static int ipts_receiver_handle_set_mem_window(struct ipts_context *ipts)
++{
++	dev_info(ipts->dev, "Device %04hX:%04hX ready\n",
++		 ipts->device_info.vendor_id, ipts->device_info.device_id);
++	ipts->status = IPTS_HOST_STATUS_STARTED;
++
++	return ipts_control_send(ipts, IPTS_CMD_READY_FOR_DATA, NULL, 0);
++}
++
++static int ipts_receiver_handle_clear_mem_window(struct ipts_context *ipts)
++{
++	ipts->status = IPTS_HOST_STATUS_STOPPED;
++
++	if (ipts->restart)
++		return ipts_control_start(ipts);
++
++	return 0;
++}
++
++static bool ipts_receiver_sensor_was_reset(u32 status)
++{
++	return status == IPTS_STATUS_SENSOR_EXPECTED_RESET ||
++	       status == IPTS_STATUS_SENSOR_UNEXPECTED_RESET;
++}
++
++static bool ipts_receiver_handle_error(struct ipts_context *ipts,
++				       struct ipts_response *rsp)
++{
++	bool error;
++
++	switch (rsp->status) {
++	case IPTS_STATUS_SUCCESS:
++	case IPTS_STATUS_COMPAT_CHECK_FAIL:
++		error = false;
++		break;
++	case IPTS_STATUS_INVALID_PARAMS:
++		error = rsp->code != IPTS_RSP_FEEDBACK;
++		break;
++	case IPTS_STATUS_SENSOR_DISABLED:
++		error = ipts->status != IPTS_HOST_STATUS_STOPPING;
++		break;
++	default:
++		error = true;
++		break;
++	}
++
++	if (!error)
++		return false;
++
++	dev_err(ipts->dev, "Command 0x%08x failed: %d\n", rsp->code,
++		rsp->status);
++
++	if (ipts_receiver_sensor_was_reset(rsp->status)) {
++		dev_err(ipts->dev, "Sensor was reset\n");
++
++		if (ipts_control_restart(ipts))
++			dev_err(ipts->dev, "Failed to restart IPTS\n");
++	}
++
++	return true;
++}
++
++static void ipts_receiver_handle_response(struct ipts_context *ipts,
++					  struct ipts_response *rsp)
++{
++	int ret;
++
++	if (ipts_receiver_handle_error(ipts, rsp))
++		return;
++
++	switch (rsp->code) {
++	case IPTS_RSP_GET_DEVICE_INFO:
++		ret = ipts_receiver_handle_get_device_info(ipts, rsp);
++		break;
++	case IPTS_RSP_SET_MODE:
++		ret = ipts_receiver_handle_set_mode(ipts);
++		break;
++	case IPTS_RSP_SET_MEM_WINDOW:
++		ret = ipts_receiver_handle_set_mem_window(ipts);
++		break;
++	case IPTS_RSP_CLEAR_MEM_WINDOW:
++		ret = ipts_receiver_handle_clear_mem_window(ipts);
++		break;
++	default:
++		ret = 0;
++		break;
++	}
++
++	if (!ret)
++		return;
++
++	dev_err(ipts->dev, "Error while handling response 0x%08x: %d\n",
++		rsp->code, ret);
++
++	if (ipts_control_stop(ipts))
++		dev_err(ipts->dev, "Failed to stop IPTS\n");
++}
++
++void ipts_receiver_callback(struct mei_cl_device *cldev)
++{
++	int ret;
++	struct ipts_response rsp;
++	struct ipts_context *ipts;
++
++	ipts = mei_cldev_get_drvdata(cldev);
++
++	ret = mei_cldev_recv(cldev, (u8 *)&rsp, sizeof(struct ipts_response));
++	if (ret <= 0) {
++		dev_err(ipts->dev, "Error while reading response: %d\n", ret);
++		return;
++	}
++
++	ipts_receiver_handle_response(ipts, &rsp);
++}
+diff --git a/drivers/misc/ipts/receiver.h b/drivers/misc/ipts/receiver.h
+new file mode 100644
+index 000000000000..7f075afa7ef8
+--- /dev/null
++++ b/drivers/misc/ipts/receiver.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_RECEIVER_H_
++#define _IPTS_RECEIVER_H_
++
++#include <linux/mei_cl_bus.h>
++
++void ipts_receiver_callback(struct mei_cl_device *cldev);
++
++#endif /* _IPTS_RECEIVER_H_ */
+diff --git a/drivers/misc/ipts/resources.c b/drivers/misc/ipts/resources.c
+new file mode 100644
+index 000000000000..8e3a2409e438
+--- /dev/null
++++ b/drivers/misc/ipts/resources.c
+@@ -0,0 +1,128 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/dma-mapping.h>
++
++#include "context.h"
++
++void ipts_resources_free(struct ipts_context *ipts)
++{
++	int i;
++	struct ipts_buffer_info *buffers;
++
++	u32 data_buffer_size = ipts->device_info.data_size;
++	u32 feedback_buffer_size = ipts->device_info.feedback_size;
++
++	buffers = ipts->data;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		if (!buffers[i].address)
++			continue;
++
++		dma_free_coherent(ipts->dev, data_buffer_size,
++				  buffers[i].address, buffers[i].dma_address);
++
++		buffers[i].address = NULL;
++		buffers[i].dma_address = 0;
++	}
++
++	buffers = ipts->feedback;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		if (!buffers[i].address)
++			continue;
++
++		dma_free_coherent(ipts->dev, feedback_buffer_size,
++				  buffers[i].address, buffers[i].dma_address);
++
++		buffers[i].address = NULL;
++		buffers[i].dma_address = 0;
++	}
++
++	if (ipts->doorbell.address) {
++		dma_free_coherent(ipts->dev, sizeof(u32),
++				  ipts->doorbell.address,
++				  ipts->doorbell.dma_address);
++
++		ipts->doorbell.address = NULL;
++		ipts->doorbell.dma_address = 0;
++	}
++
++	if (ipts->workqueue.address) {
++		dma_free_coherent(ipts->dev, sizeof(u32),
++				  ipts->workqueue.address,
++				  ipts->workqueue.dma_address);
++
++		ipts->workqueue.address = NULL;
++		ipts->workqueue.dma_address = 0;
++	}
++
++	if (ipts->host2me.address) {
++		dma_free_coherent(ipts->dev, feedback_buffer_size,
++				  ipts->host2me.address,
++				  ipts->host2me.dma_address);
++
++		ipts->host2me.address = NULL;
++		ipts->host2me.dma_address = 0;
++	}
++}
++
++int ipts_resources_alloc(struct ipts_context *ipts)
++{
++	int i;
++	struct ipts_buffer_info *buffers;
++
++	u32 data_buffer_size = ipts->device_info.data_size;
++	u32 feedback_buffer_size = ipts->device_info.feedback_size;
++
++	buffers = ipts->data;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		buffers[i].address =
++			dma_alloc_coherent(ipts->dev, data_buffer_size,
++					   &buffers[i].dma_address, GFP_KERNEL);
++
++		if (!buffers[i].address)
++			goto release_resources;
++	}
++
++	buffers = ipts->feedback;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		buffers[i].address =
++			dma_alloc_coherent(ipts->dev, feedback_buffer_size,
++					   &buffers[i].dma_address, GFP_KERNEL);
++
++		if (!buffers[i].address)
++			goto release_resources;
++	}
++
++	ipts->doorbell.address =
++		dma_alloc_coherent(ipts->dev, sizeof(u32),
++				   &ipts->doorbell.dma_address, GFP_KERNEL);
++
++	if (!ipts->doorbell.address)
++		goto release_resources;
++
++	ipts->workqueue.address =
++		dma_alloc_coherent(ipts->dev, sizeof(u32),
++				   &ipts->workqueue.dma_address, GFP_KERNEL);
++
++	if (!ipts->workqueue.address)
++		goto release_resources;
++
++	ipts->host2me.address =
++		dma_alloc_coherent(ipts->dev, feedback_buffer_size,
++				   &ipts->host2me.dma_address, GFP_KERNEL);
++
++	if (!ipts->workqueue.address)
++		goto release_resources;
++
++	return 0;
++
++release_resources:
++
++	ipts_resources_free(ipts);
++	return -ENOMEM;
++}
+diff --git a/drivers/misc/ipts/resources.h b/drivers/misc/ipts/resources.h
+new file mode 100644
+index 000000000000..fdac0eee9156
+--- /dev/null
++++ b/drivers/misc/ipts/resources.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_RESOURCES_H_
++#define _IPTS_RESOURCES_H_
++
++#include "context.h"
++
++int ipts_resources_alloc(struct ipts_context *ipts);
++void ipts_resources_free(struct ipts_context *ipts);
++
++#endif /* _IPTS_RESOURCES_H_ */
+diff --git a/drivers/misc/ipts/uapi.c b/drivers/misc/ipts/uapi.c
+new file mode 100644
+index 000000000000..8107a027223f
+--- /dev/null
++++ b/drivers/misc/ipts/uapi.c
+@@ -0,0 +1,211 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++#include <linux/types.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "uapi.h"
++
++struct ipts_uapi uapi;
++
++static ssize_t ipts_uapi_read(struct file *file, char __user *buf, size_t count,
++			      loff_t *offset)
++{
++	int buffer;
++	int maxbytes;
++	struct ipts_context *ipts = uapi.ipts;
++
++	buffer = MINOR(file->f_path.dentry->d_inode->i_rdev);
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	maxbytes = ipts->device_info.data_size - *offset;
++	if (maxbytes <= 0 || count > maxbytes)
++		return -EINVAL;
++
++	if (copy_to_user(buf, ipts->data[buffer].address + *offset, count))
++		return -EFAULT;
++
++	return count;
++}
++
++static long ipts_uapi_ioctl_get_device_ready(struct ipts_context *ipts,
++					     unsigned long arg)
++{
++	void __user *buffer = (void __user *)arg;
++	u8 ready = 0;
++
++	if (ipts)
++		ready = ipts->status == IPTS_HOST_STATUS_STARTED;
++
++	if (copy_to_user(buffer, &ready, sizeof(u8)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_get_device_info(struct ipts_context *ipts,
++					    unsigned long arg)
++{
++	struct ipts_device_info info;
++	void __user *buffer = (void __user *)arg;
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	info.vendor = ipts->device_info.vendor_id;
++	info.product = ipts->device_info.device_id;
++	info.version = ipts->device_info.fw_rev;
++	info.buffer_size = ipts->device_info.data_size;
++	info.max_contacts = ipts->device_info.max_contacts;
++
++	if (copy_to_user(buffer, &info, sizeof(struct ipts_device_info)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_get_doorbell(struct ipts_context *ipts,
++					 unsigned long arg)
++{
++	void __user *buffer = (void __user *)arg;
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	if (copy_to_user(buffer, ipts->doorbell.address, sizeof(u32)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_send_feedback(struct ipts_context *ipts,
++					  struct file *file)
++{
++	int ret;
++	struct ipts_feedback_cmd cmd;
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	memset(&cmd, 0, sizeof(struct ipts_feedback_cmd));
++	cmd.buffer = MINOR(file->f_path.dentry->d_inode->i_rdev);
++
++	ret = ipts_control_send(ipts, IPTS_CMD_FEEDBACK, &cmd,
++				sizeof(struct ipts_feedback_cmd));
++
++	if (ret)
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_send_reset(struct ipts_context *ipts)
++{
++	int ret;
++	struct ipts_reset_sensor_cmd cmd;
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	memset(&cmd, 0, sizeof(struct ipts_reset_sensor_cmd));
++	cmd.type = IPTS_RESET_TYPE_SOFT;
++
++	ret = ipts_control_send(ipts, IPTS_CMD_RESET_SENSOR, &cmd,
++				sizeof(struct ipts_reset_sensor_cmd));
++
++	if (ret)
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl(struct file *file, unsigned int cmd,
++			    unsigned long arg)
++{
++	struct ipts_context *ipts = uapi.ipts;
++
++	switch (cmd) {
++	case IPTS_IOCTL_GET_DEVICE_READY:
++		return ipts_uapi_ioctl_get_device_ready(ipts, arg);
++	case IPTS_IOCTL_GET_DEVICE_INFO:
++		return ipts_uapi_ioctl_get_device_info(ipts, arg);
++	case IPTS_IOCTL_GET_DOORBELL:
++		return ipts_uapi_ioctl_get_doorbell(ipts, arg);
++	case IPTS_IOCTL_SEND_FEEDBACK:
++		return ipts_uapi_ioctl_send_feedback(ipts, file);
++	case IPTS_IOCTL_SEND_RESET:
++		return ipts_uapi_ioctl_send_reset(ipts);
++	default:
++		return -ENOTTY;
++	}
++}
++
++static const struct file_operations ipts_uapi_fops = {
++	.owner = THIS_MODULE,
++	.read = ipts_uapi_read,
++	.unlocked_ioctl = ipts_uapi_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = ipts_uapi_ioctl,
++#endif
++};
++
++void ipts_uapi_link(struct ipts_context *ipts)
++{
++	uapi.ipts = ipts;
++}
++
++void ipts_uapi_unlink(void)
++{
++	uapi.ipts = NULL;
++}
++
++int ipts_uapi_init(void)
++{
++	int i, major;
++
++	alloc_chrdev_region(&uapi.dev, 0, IPTS_BUFFERS, "ipts");
++	uapi.class = class_create(THIS_MODULE, "ipts");
++
++	major = MAJOR(uapi.dev);
++
++	cdev_init(&uapi.cdev, &ipts_uapi_fops);
++	uapi.cdev.owner = THIS_MODULE;
++	cdev_add(&uapi.cdev, MKDEV(major, 0), IPTS_BUFFERS);
++
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		device_create(uapi.class, NULL, MKDEV(major, i), NULL,
++			      "ipts/%d", i);
++	}
++
++	return 0;
++}
++
++void ipts_uapi_free(void)
++{
++	int i;
++	int major;
++
++	major = MAJOR(uapi.dev);
++
++	for (i = 0; i < IPTS_BUFFERS; i++)
++		device_destroy(uapi.class, MKDEV(major, i));
++
++	cdev_del(&uapi.cdev);
++
++	unregister_chrdev_region(MKDEV(major, 0), MINORMASK);
++	class_destroy(uapi.class);
++}
+diff --git a/drivers/misc/ipts/uapi.h b/drivers/misc/ipts/uapi.h
+new file mode 100644
+index 000000000000..53fb86a88f97
+--- /dev/null
++++ b/drivers/misc/ipts/uapi.h
+@@ -0,0 +1,47 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_UAPI_H_
++#define _IPTS_UAPI_H_
++
++#include <linux/types.h>
++
++#include "context.h"
++
++struct ipts_uapi {
++	dev_t dev;
++	struct class *class;
++	struct cdev cdev;
++
++	struct ipts_context *ipts;
++};
++
++struct ipts_device_info {
++	__u16 vendor;
++	__u16 product;
++	__u32 version;
++	__u32 buffer_size;
++	__u8 max_contacts;
++
++	/* For future expansion */
++	__u8 reserved[19];
++};
++
++#define IPTS_IOCTL_GET_DEVICE_READY _IOR(0x86, 0x01, __u8)
++#define IPTS_IOCTL_GET_DEVICE_INFO  _IOR(0x86, 0x02, struct ipts_device_info)
++#define IPTS_IOCTL_GET_DOORBELL	    _IOR(0x86, 0x03, __u32)
++#define IPTS_IOCTL_SEND_FEEDBACK    _IO(0x86, 0x04)
++#define IPTS_IOCTL_SEND_RESET	    _IO(0x86, 0x05)
++
++void ipts_uapi_link(struct ipts_context *ipts);
++void ipts_uapi_unlink(void);
++
++int ipts_uapi_init(void);
++void ipts_uapi_free(void);
++
++#endif /* _IPTS_UAPI_H_ */
+-- 
+2.30.1
+

+ 334 - 0
patches/5.11/0004-surface-sam-over-hid.patch

@@ -0,0 +1,334 @@
+From e84ca436738d7fd8781f77e7bbcb813f602dce3c Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sat, 25 Jul 2020 17:19:53 +0200
+Subject: [PATCH] i2c: acpi: Implement RawBytes read access
+
+Microsoft Surface Pro 4 and Book 1 devices access the MSHW0030 I2C
+device via a generic serial bus operation region and RawBytes read
+access. On the Surface Book 1, this access is required to turn on (and
+off) the discrete GPU.
+
+Multiple things are to note here:
+
+a) The RawBytes access is device/driver dependent. The ACPI
+   specification states:
+
+   > Raw accesses assume that the writer has knowledge of the bus that
+   > the access is made over and the device that is being accessed. The
+   > protocol may only ensure that the buffer is transmitted to the
+   > appropriate driver, but the driver must be able to interpret the
+   > buffer to communicate to a register.
+
+   Thus this implementation may likely not work on other devices
+   accessing I2C via the RawBytes accessor type.
+
+b) The MSHW0030 I2C device is an HID-over-I2C device which seems to
+   serve multiple functions:
+
+   1. It is the main access point for the legacy-type Surface Aggregator
+      Module (also referred to as SAM-over-HID, as opposed to the newer
+      SAM-over-SSH/UART). It has currently not been determined on how
+      support for the legacy SAM should be implemented. Likely via a
+      custom HID driver.
+
+   2. It seems to serve as the HID device for the Integrated Sensor Hub.
+      This might complicate matters with regards to implementing a
+      SAM-over-HID driver required by legacy SAM.
+
+In light of this, the simplest approach has been chosen for now.
+However, it may make more sense regarding breakage and compatibility to
+either provide functionality for replacing or enhancing the default
+operation region handler via some additional API functions, or even to
+completely blacklist MSHW0030 from the I2C core and provide a custom
+driver for it.
+
+Replacing/enhancing the default operation region handler would, however,
+either require some sort of secondary driver and access point for it,
+from which the new API functions would be called and the new handler
+(part) would be installed, or hard-coding them via some sort of
+quirk-like interface into the I2C core.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Patchset: surface-sam-over-hid
+---
+ drivers/i2c/i2c-core-acpi.c | 35 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 37c510d9347a..aed579942436 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -574,6 +574,28 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+ 	return (ret == 1) ? 0 : -EIO;
+ }
+ 
++static int acpi_gsb_i2c_write_raw_bytes(struct i2c_client *client,
++		u8 *data, u8 data_len)
++{
++	struct i2c_msg msgs[1];
++	int ret = AE_OK;
++
++	msgs[0].addr = client->addr;
++	msgs[0].flags = client->flags;
++	msgs[0].len = data_len + 1;
++	msgs[0].buf = data;
++
++	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++
++	if (ret < 0) {
++		dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret);
++		return ret;
++	}
++
++	/* 1 transfer must have completed successfully */
++	return (ret == 1) ? 0 : -EIO;
++}
++
+ static acpi_status
+ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
+ 			u32 bits, u64 *value64,
+@@ -675,6 +697,19 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
+ 		}
+ 		break;
+ 
++	case ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES:
++		if (action == ACPI_READ) {
++			dev_warn(&adapter->dev,
++				 "protocol 0x%02x not supported for client 0x%02x\n",
++				 accessor_type, client->addr);
++			ret = AE_BAD_PARAMETER;
++			goto err;
++		} else {
++			status = acpi_gsb_i2c_write_raw_bytes(client,
++					gsb->data, info->access_length);
++		}
++		break;
++
+ 	default:
+ 		dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n",
+ 			 accessor_type, client->addr);
+-- 
+2.30.1
+
+From 79a4b8747237071f0b4b2bda1d57fcfc730fd824 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sat, 13 Feb 2021 16:41:18 +0100
+Subject: [PATCH] platform/surface: Add driver for Surface Book 1 dGPU switch
+
+Add driver exposing the discrete GPU power-switch of the  Microsoft
+Surface Book 1 to user-space.
+
+On the Surface Book 1, the dGPU power is controlled via the Surface
+System Aggregator Module (SAM). The specific SAM-over-HID command for
+this is exposed via ACPI. This module provides a simple driver exposing
+the ACPI call via a sysfs parameter to user-space, so that users can
+easily power-on/-off the dGPU.
+
+Patchset: surface-sam-over-hid
+---
+ drivers/platform/surface/Kconfig              |   7 +
+ drivers/platform/surface/Makefile             |   1 +
+ .../surface/surfacebook1_dgpu_switch.c        | 162 ++++++++++++++++++
+ 3 files changed, 170 insertions(+)
+ create mode 100644 drivers/platform/surface/surfacebook1_dgpu_switch.c
+
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index 2c941cdac9ee..b5dc9148066c 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -41,6 +41,13 @@ config SURFACE_3_POWER_OPREGION
+ 	  This driver provides support for ACPI operation
+ 	  region of the Surface 3 battery platform driver.
+ 
++config SURFACE_BOOK1_DGPU_SWITCH
++	tristate "Surface Book 1 dGPU Switch Driver"
++	depends on SYSFS
++	help
++	  This driver provides a sysfs switch to set the power-state of the
++	  discrete GPU found on the Microsoft Surface Book 1.
++
+ config SURFACE_GPE
+ 	tristate "Surface GPE/Lid Support Driver"
+ 	depends on DMI
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index cedfb027ded1..3d5fa0daa56b 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -7,5 +7,6 @@
+ obj-$(CONFIG_SURFACE3_WMI)		+= surface3-wmi.o
+ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
++obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surfacebook1_dgpu_switch.c b/drivers/platform/surface/surfacebook1_dgpu_switch.c
+new file mode 100644
+index 000000000000..8b816ed8f35c
+--- /dev/null
++++ b/drivers/platform/surface/surfacebook1_dgpu_switch.c
+@@ -0,0 +1,162 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/acpi.h>
++#include <linux/platform_device.h>
++
++
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
++#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
++
++
++static const guid_t dgpu_sw_guid = GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4,
++	0x95, 0xed, 0xab, 0x16, 0x65, 0x49, 0x80, 0x35);
++
++#define DGPUSW_ACPI_PATH_DSM	"\\_SB_.PCI0.LPCB.EC0_.VGBI"
++#define DGPUSW_ACPI_PATH_HGON	"\\_SB_.PCI0.RP05.HGON"
++#define DGPUSW_ACPI_PATH_HGOF	"\\_SB_.PCI0.RP05.HGOF"
++
++
++static int sb1_dgpu_sw_dsmcall(void)
++{
++	union acpi_object *ret;
++	acpi_handle handle;
++	acpi_status status;
++
++	status = acpi_get_handle(NULL, DGPUSW_ACPI_PATH_DSM, &handle);
++	if (status)
++		return -EINVAL;
++
++	ret = acpi_evaluate_dsm_typed(handle, &dgpu_sw_guid, 1, 1, NULL, ACPI_TYPE_BUFFER);
++	if (!ret)
++		return -EINVAL;
++
++	ACPI_FREE(ret);
++	return 0;
++}
++
++static int sb1_dgpu_sw_hgon(void)
++{
++	struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
++	acpi_status status;
++
++	status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGON, NULL, &buf);
++	if (status) {
++		pr_err("failed to run HGON: %d\n", status);
++		return -EINVAL;
++	}
++
++	if (buf.pointer)
++		ACPI_FREE(buf.pointer);
++
++	pr_info("turned-on dGPU via HGON\n");
++	return 0;
++}
++
++static int sb1_dgpu_sw_hgof(void)
++{
++	struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
++	acpi_status status;
++
++	status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGOF, NULL, &buf);
++	if (status) {
++		pr_err("failed to run HGOF: %d\n", status);
++		return -EINVAL;
++	}
++
++	if (buf.pointer)
++		ACPI_FREE(buf.pointer);
++
++	pr_info("turned-off dGPU via HGOF\n");
++	return 0;
++}
++
++
++static ssize_t dgpu_dsmcall_store(struct device *dev, struct device_attribute *attr,
++				  const char *buf, size_t len)
++{
++	int status, value;
++
++	status = kstrtoint(buf, 0, &value);
++	if (status < 0)
++		return status;
++
++	if (value != 1)
++		return -EINVAL;
++
++	status = sb1_dgpu_sw_dsmcall();
++
++	return status < 0 ? status : len;
++}
++
++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
++				const char *buf, size_t len)
++{
++	bool power;
++	int status;
++
++	status = kstrtobool(buf, &power);
++	if (status < 0)
++		return status;
++
++	if (power)
++		status = sb1_dgpu_sw_hgon();
++	else
++		status = sb1_dgpu_sw_hgof();
++
++	return status < 0 ? status : len;
++}
++
++static DEVICE_ATTR_WO(dgpu_dsmcall);
++static DEVICE_ATTR_WO(dgpu_power);
++
++static struct attribute *sb1_dgpu_sw_attrs[] = {
++	&dev_attr_dgpu_dsmcall.attr,
++	&dev_attr_dgpu_power.attr,
++	NULL,
++};
++
++static const struct attribute_group sb1_dgpu_sw_attr_group = {
++	.attrs = sb1_dgpu_sw_attrs,
++};
++
++
++static int sb1_dgpu_sw_probe(struct platform_device *pdev)
++{
++	return sysfs_create_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group);
++}
++
++static int sb1_dgpu_sw_remove(struct platform_device *pdev)
++{
++	sysfs_remove_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group);
++	return 0;
++}
++
++/*
++ * The dGPU power seems to be actually handled by MSHW0040. However, that is
++ * also the power-/volume-button device with a mainline driver. So let's use
++ * MSHW0041 instead for now, which seems to be the LTCH (latch/DTX) device.
++ */
++static const struct acpi_device_id sb1_dgpu_sw_match[] = {
++	{ "MSHW0041", },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, sb1_dgpu_sw_match);
++
++static struct platform_driver sb1_dgpu_sw = {
++	.probe = sb1_dgpu_sw_probe,
++	.remove = sb1_dgpu_sw_remove,
++	.driver = {
++		.name = "surfacebook1_dgpu_switch",
++		.acpi_match_table = sb1_dgpu_sw_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(sb1_dgpu_sw);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Discrete GPU Power-Switch for Surface Book 1");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+

+ 21886 - 0
patches/5.11/0005-surface-sam.patch

@@ -0,0 +1,21886 @@
+From e843aba556dfdcc3eb52a59da2581ba07c129264 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:51 +0100
+Subject: [PATCH] platform/surface: Add Surface Aggregator subsystem
+
+Add Surface System Aggregator Module core and Surface Serial Hub driver,
+required for the embedded controller found on Microsoft Surface devices.
+
+The Surface System Aggregator Module (SSAM, SAM or Surface Aggregator)
+is an embedded controller (EC) found on 4th and later generation
+Microsoft Surface devices, with the exception of the Surface Go series.
+This EC provides various functionality, depending on the device in
+question. This can include battery status and thermal reporting (5th and
+later generations), but also HID keyboard (6th+) and touchpad input
+(7th+) on Surface Laptop and Surface Book 3 series devices.
+
+This patch provides the basic necessities for communication with the SAM
+EC on 5th and later generation devices. On these devices, the EC
+provides an interface that acts as serial device, called the Surface
+Serial Hub (SSH). 4th generation devices, on which the EC interface is
+provided via an HID-over-I2C device, are not supported by this patch.
+
+Specifically, this patch adds a driver for the SSH device (device HID
+MSHW0084 in ACPI), as well as a controller structure and associated API.
+This represents the functional core of the Surface Aggregator kernel
+subsystem, introduced with this patch, and will be expanded upon in
+subsequent commits.
+
+The SSH driver acts as the main attachment point for this subsystem and
+sets-up and manages the controller structure. The controller in turn
+provides a basic communication interface, allowing to send requests from
+host to EC and receiving the corresponding responses, as well as
+managing and receiving events, sent from EC to host. It is structured
+into multiple layers, with the top layer presenting the API used by
+other kernel drivers and the lower layers modeled after the serial
+protocol used for communication.
+
+Said other drivers are then responsible for providing the (Surface model
+specific) functionality accessible through the EC (e.g. battery status
+reporting, thermal information, ...) via said controller structure and
+API, and will be added in future commits.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-2-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ MAINTAINERS                                   |    8 +
+ drivers/platform/surface/Kconfig              |    2 +
+ drivers/platform/surface/Makefile             |    1 +
+ drivers/platform/surface/aggregator/Kconfig   |   42 +
+ drivers/platform/surface/aggregator/Makefile  |   10 +
+ .../platform/surface/aggregator/controller.c  | 2504 +++++++++++++++++
+ .../platform/surface/aggregator/controller.h  |  276 ++
+ drivers/platform/surface/aggregator/core.c    |  787 ++++++
+ .../platform/surface/aggregator/ssh_msgb.h    |  205 ++
+ .../surface/aggregator/ssh_packet_layer.c     | 1710 +++++++++++
+ .../surface/aggregator/ssh_packet_layer.h     |  187 ++
+ .../platform/surface/aggregator/ssh_parser.c  |  228 ++
+ .../platform/surface/aggregator/ssh_parser.h  |  154 +
+ .../surface/aggregator/ssh_request_layer.c    | 1211 ++++++++
+ .../surface/aggregator/ssh_request_layer.h    |  143 +
+ include/linux/surface_aggregator/controller.h |  824 ++++++
+ include/linux/surface_aggregator/serial_hub.h |  672 +++++
+ 17 files changed, 8964 insertions(+)
+ create mode 100644 drivers/platform/surface/aggregator/Kconfig
+ create mode 100644 drivers/platform/surface/aggregator/Makefile
+ create mode 100644 drivers/platform/surface/aggregator/controller.c
+ create mode 100644 drivers/platform/surface/aggregator/controller.h
+ create mode 100644 drivers/platform/surface/aggregator/core.c
+ create mode 100644 drivers/platform/surface/aggregator/ssh_msgb.h
+ create mode 100644 drivers/platform/surface/aggregator/ssh_packet_layer.c
+ create mode 100644 drivers/platform/surface/aggregator/ssh_packet_layer.h
+ create mode 100644 drivers/platform/surface/aggregator/ssh_parser.c
+ create mode 100644 drivers/platform/surface/aggregator/ssh_parser.h
+ create mode 100644 drivers/platform/surface/aggregator/ssh_request_layer.c
+ create mode 100644 drivers/platform/surface/aggregator/ssh_request_layer.h
+ create mode 100644 include/linux/surface_aggregator/controller.h
+ create mode 100644 include/linux/surface_aggregator/serial_hub.h
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index bfc1b86e3e73..2818a31d79db 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11807,6 +11807,14 @@ L:	platform-driver-x86@vger.kernel.org
+ S:	Supported
+ F:	drivers/platform/surface/surfacepro3_button.c
+ 
++MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
++M:	Maximilian Luz <luzmaximilian@gmail.com>
++S:	Maintained
++W:	https://github.com/linux-surface/surface-aggregator-module
++C:	irc://chat.freenode.net/##linux-surface
++F:	drivers/platform/surface/aggregator/
++F:	include/linux/surface_aggregator/
++
+ MICROTEK X6 SCANNER
+ M:	Oliver Neukum <oliver@neukum.org>
+ S:	Maintained
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index b5dc9148066c..ef6b4051e7c8 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -63,4 +63,6 @@ config SURFACE_PRO3_BUTTON
+ 	help
+ 	  This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+ 
++source "drivers/platform/surface/aggregator/Kconfig"
++
+ endif # SURFACE_PLATFORMS
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 3d5fa0daa56b..c5392098cfb9 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -7,6 +7,7 @@
+ obj-$(CONFIG_SURFACE3_WMI)		+= surface3-wmi.o
+ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
++obj-$(CONFIG_SURFACE_AGGREGATOR)	+= aggregator/
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
+new file mode 100644
+index 000000000000..e9f4ad96e40a
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/Kconfig
+@@ -0,0 +1,42 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++menuconfig SURFACE_AGGREGATOR
++	tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
++	depends on SERIAL_DEV_BUS
++	select CRC_CCITT
++	help
++	  The Surface System Aggregator Module (Surface SAM or SSAM) is an
++	  embedded controller (EC) found on 5th- and later-generation Microsoft
++	  Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop,
++	  and newer, with exception of Surface Go series devices).
++
++	  Depending on the device in question, this EC provides varying
++	  functionality, including:
++	  - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation)
++	  - battery status information (all devices)
++	  - thermal sensor access (all devices)
++	  - performance mode / cooling mode control (all devices)
++	  - clipboard detachment system control (Surface Book 2 and 3)
++	  - HID / keyboard input (Surface Laptops, Surface Book 3)
++
++	  This option controls whether the Surface SAM subsystem core will be
++	  built. This includes a driver for the Surface Serial Hub (SSH), which
++	  is the device responsible for the communication with the EC, and a
++	  basic kernel interface exposing the EC functionality to other client
++	  drivers, i.e. allowing them to make requests to the EC and receive
++	  events from it. Selecting this option alone will not provide any
++	  client drivers and therefore no functionality beyond the in-kernel
++	  interface. Said functionality is the responsibility of the respective
++	  client drivers.
++
++	  Note: While 4th-generation Surface devices also make use of a SAM EC,
++	  due to a difference in the communication interface of the controller,
++	  only 5th and later generations are currently supported. Specifically,
++	  devices using SAM-over-SSH are supported, whereas devices using
++	  SAM-over-HID, which is used on the 4th generation, are currently not
++	  supported.
++
++	  Choose m if you want to build the SAM subsystem core and SSH driver as
++	  module, y if you want to build it into the kernel and n if you don't
++	  want it at all.
+diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
+new file mode 100644
+index 000000000000..faad18d4a7f2
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/Makefile
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
++
++surface_aggregator-objs := core.o
++surface_aggregator-objs += ssh_parser.o
++surface_aggregator-objs += ssh_packet_layer.o
++surface_aggregator-objs += ssh_request_layer.o
++surface_aggregator-objs += controller.o
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+new file mode 100644
+index 000000000000..488318cf2098
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -0,0 +1,2504 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Main SSAM/SSH controller structure and functionality.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/gpio/consumer.h>
++#include <linux/interrupt.h>
++#include <linux/kref.h>
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/lockdep.h>
++#include <linux/mutex.h>
++#include <linux/rculist.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/serdev.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/srcu.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "controller.h"
++#include "ssh_msgb.h"
++#include "ssh_request_layer.h"
++
++
++/* -- Safe counters. -------------------------------------------------------- */
++
++/**
++ * ssh_seq_reset() - Reset/initialize sequence ID counter.
++ * @c: The counter to reset.
++ */
++static void ssh_seq_reset(struct ssh_seq_counter *c)
++{
++	WRITE_ONCE(c->value, 0);
++}
++
++/**
++ * ssh_seq_next() - Get next sequence ID.
++ * @c: The counter providing the sequence IDs.
++ *
++ * Return: Returns the next sequence ID of the counter.
++ */
++static u8 ssh_seq_next(struct ssh_seq_counter *c)
++{
++	u8 old = READ_ONCE(c->value);
++	u8 new = old + 1;
++	u8 ret;
++
++	while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
++		old = ret;
++		new = old + 1;
++	}
++
++	return old;
++}
++
++/**
++ * ssh_rqid_reset() - Reset/initialize request ID counter.
++ * @c: The counter to reset.
++ */
++static void ssh_rqid_reset(struct ssh_rqid_counter *c)
++{
++	WRITE_ONCE(c->value, 0);
++}
++
++/**
++ * ssh_rqid_next() - Get next request ID.
++ * @c: The counter providing the request IDs.
++ *
++ * Return: Returns the next request ID of the counter, skipping any reserved
++ * request IDs.
++ */
++static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
++{
++	u16 old = READ_ONCE(c->value);
++	u16 new = ssh_rqid_next_valid(old);
++	u16 ret;
++
++	while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
++		old = ret;
++		new = ssh_rqid_next_valid(old);
++	}
++
++	return old;
++}
++
++
++/* -- Event notifier/callbacks. --------------------------------------------- */
++/*
++ * The notifier system is based on linux/notifier.h, specifically the SRCU
++ * implementation. The difference to that is, that some bits of the notifier
++ * call return value can be tracked across multiple calls. This is done so
++ * that handling of events can be tracked and a warning can be issued in case
++ * an event goes unhandled. The idea of that warning is that it should help
++ * discover and identify new/currently unimplemented features.
++ */
++
++/**
++ * ssam_event_matches_notifier() - Test if an event matches a notifier.
++ * @n: The event notifier to test against.
++ * @event: The event to test.
++ *
++ * Return: Returns %true if the given event matches the given notifier
++ * according to the rules set in the notifier's event mask, %false otherwise.
++ */
++static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n,
++					const struct ssam_event *event)
++{
++	bool match = n->event.id.target_category == event->target_category;
++
++	if (n->event.mask & SSAM_EVENT_MASK_TARGET)
++		match &= n->event.reg.target_id == event->target_id;
++
++	if (n->event.mask & SSAM_EVENT_MASK_INSTANCE)
++		match &= n->event.id.instance == event->instance_id;
++
++	return match;
++}
++
++/**
++ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
++ * @nh:    The notifier head for which the notifier callbacks should be called.
++ * @event: The event data provided to the callbacks.
++ *
++ * Call all registered notifier callbacks in order of their priority until
++ * either no notifier is left or a notifier returns a value with the
++ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
++ * ssam_notifier_from_errno() on any non-zero error value.
++ *
++ * Return: Returns the notifier status value, which contains the notifier
++ * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
++ * potential error value returned from the last executed notifier callback.
++ * Use ssam_notifier_to_errno() to convert this value to the original error
++ * value.
++ */
++static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
++{
++	struct ssam_event_notifier *nf;
++	int ret = 0, idx;
++
++	idx = srcu_read_lock(&nh->srcu);
++
++	list_for_each_entry_rcu(nf, &nh->head, base.node,
++				srcu_read_lock_held(&nh->srcu)) {
++		if (ssam_event_matches_notifier(nf, event)) {
++			ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event);
++			if (ret & SSAM_NOTIF_STOP)
++				break;
++		}
++	}
++
++	srcu_read_unlock(&nh->srcu, idx);
++	return ret;
++}
++
++/**
++ * ssam_nfblk_insert() - Insert a new notifier block into the given notifier
++ * list.
++ * @nh: The notifier head into which the block should be inserted.
++ * @nb: The notifier block to add.
++ *
++ * Note: This function must be synchronized by the caller with respect to other
++ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
++ *
++ * Return: Returns zero on success, %-EEXIST if the notifier block has already
++ * been registered.
++ */
++static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
++{
++	struct ssam_notifier_block *p;
++	struct list_head *h;
++
++	/* Runs under lock, no need for RCU variant. */
++	list_for_each(h, &nh->head) {
++		p = list_entry(h, struct ssam_notifier_block, node);
++
++		if (unlikely(p == nb)) {
++			WARN(1, "double register detected");
++			return -EEXIST;
++		}
++
++		if (nb->priority > p->priority)
++			break;
++	}
++
++	list_add_tail_rcu(&nb->node, h);
++	return 0;
++}
++
++/**
++ * ssam_nfblk_find() - Check if a notifier block is registered on the given
++ * notifier head.
++ * list.
++ * @nh: The notifier head on which to search.
++ * @nb: The notifier block to search for.
++ *
++ * Note: This function must be synchronized by the caller with respect to other
++ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
++ *
++ * Return: Returns true if the given notifier block is registered on the given
++ * notifier head, false otherwise.
++ */
++static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
++{
++	struct ssam_notifier_block *p;
++
++	/* Runs under lock, no need for RCU variant. */
++	list_for_each_entry(p, &nh->head, node) {
++		if (p == nb)
++			return true;
++	}
++
++	return false;
++}
++
++/**
++ * ssam_nfblk_remove() - Remove a notifier block from its notifier list.
++ * @nb: The notifier block to be removed.
++ *
++ * Note: This function must be synchronized by the caller with respect to
++ * other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
++ * Furthermore, the caller _must_ ensure SRCU synchronization by calling
++ * synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to
++ * ensure that the removed notifier block is not in use any more.
++ */
++static void ssam_nfblk_remove(struct ssam_notifier_block *nb)
++{
++	list_del_rcu(&nb->node);
++}
++
++/**
++ * ssam_nf_head_init() - Initialize the given notifier head.
++ * @nh: The notifier head to initialize.
++ */
++static int ssam_nf_head_init(struct ssam_nf_head *nh)
++{
++	int status;
++
++	status = init_srcu_struct(&nh->srcu);
++	if (status)
++		return status;
++
++	INIT_LIST_HEAD(&nh->head);
++	return 0;
++}
++
++/**
++ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
++ * @nh: The notifier head to deinitialize.
++ */
++static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
++{
++	cleanup_srcu_struct(&nh->srcu);
++}
++
++
++/* -- Event/notification registry. ------------------------------------------ */
++
++/**
++ * struct ssam_nf_refcount_key - Key used for event activation reference
++ * counting.
++ * @reg: The registry via which the event is enabled/disabled.
++ * @id:  The ID uniquely describing the event.
++ */
++struct ssam_nf_refcount_key {
++	struct ssam_event_registry reg;
++	struct ssam_event_id id;
++};
++
++/**
++ * struct ssam_nf_refcount_entry - RB-tree entry for reference counting event
++ * activations.
++ * @node:     The node of this entry in the rb-tree.
++ * @key:      The key of the event.
++ * @refcount: The reference-count of the event.
++ * @flags:    The flags used when enabling the event.
++ */
++struct ssam_nf_refcount_entry {
++	struct rb_node node;
++	struct ssam_nf_refcount_key key;
++	int refcount;
++	u8 flags;
++};
++
++/**
++ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
++ * event.
++ * @nf:  The notifier system reference.
++ * @reg: The registry used to enable/disable the event.
++ * @id:  The event ID.
++ *
++ * Increments the reference-/activation-count associated with the specified
++ * event type/ID, allocating a new entry for this event ID if necessary. A
++ * newly allocated entry will have a refcount of one.
++ *
++ * Note: ``nf->lock`` must be held when calling this function.
++ *
++ * Return: Returns the refcount entry on success. Returns an error pointer
++ * with %-ENOSPC if there have already been %INT_MAX events of the specified
++ * ID and type registered, or %-ENOMEM if the entry could not be allocated.
++ */
++static struct ssam_nf_refcount_entry *
++ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg,
++		     struct ssam_event_id id)
++{
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_refcount_key key;
++	struct rb_node **link = &nf->refcount.rb_node;
++	struct rb_node *parent = NULL;
++	int cmp;
++
++	lockdep_assert_held(&nf->lock);
++
++	key.reg = reg;
++	key.id = id;
++
++	while (*link) {
++		entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
++		parent = *link;
++
++		cmp = memcmp(&key, &entry->key, sizeof(key));
++		if (cmp < 0) {
++			link = &(*link)->rb_left;
++		} else if (cmp > 0) {
++			link = &(*link)->rb_right;
++		} else if (entry->refcount < INT_MAX) {
++			entry->refcount++;
++			return entry;
++		} else {
++			WARN_ON(1);
++			return ERR_PTR(-ENOSPC);
++		}
++	}
++
++	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
++	if (!entry)
++		return ERR_PTR(-ENOMEM);
++
++	entry->key = key;
++	entry->refcount = 1;
++
++	rb_link_node(&entry->node, parent, link);
++	rb_insert_color(&entry->node, &nf->refcount);
++
++	return entry;
++}
++
++/**
++ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
++ * event.
++ * @nf:  The notifier system reference.
++ * @reg: The registry used to enable/disable the event.
++ * @id:  The event ID.
++ *
++ * Decrements the reference-/activation-count of the specified event,
++ * returning its entry. If the returned entry has a refcount of zero, the
++ * caller is responsible for freeing it using kfree().
++ *
++ * Note: ``nf->lock`` must be held when calling this function.
++ *
++ * Return: Returns the refcount entry on success or %NULL if the entry has not
++ * been found.
++ */
++static struct ssam_nf_refcount_entry *
++ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
++		     struct ssam_event_id id)
++{
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_refcount_key key;
++	struct rb_node *node = nf->refcount.rb_node;
++	int cmp;
++
++	lockdep_assert_held(&nf->lock);
++
++	key.reg = reg;
++	key.id = id;
++
++	while (node) {
++		entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
++
++		cmp = memcmp(&key, &entry->key, sizeof(key));
++		if (cmp < 0) {
++			node = node->rb_left;
++		} else if (cmp > 0) {
++			node = node->rb_right;
++		} else {
++			entry->refcount--;
++			if (entry->refcount == 0)
++				rb_erase(&entry->node, &nf->refcount);
++
++			return entry;
++		}
++	}
++
++	return NULL;
++}
++
++/**
++ * ssam_nf_refcount_empty() - Test if the notification system has any
++ * enabled/active events.
++ * @nf: The notification system.
++ */
++static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
++{
++	return RB_EMPTY_ROOT(&nf->refcount);
++}
++
++/**
++ * ssam_nf_call() - Call notification callbacks for the provided event.
++ * @nf:    The notifier system
++ * @dev:   The associated device, only used for logging.
++ * @rqid:  The request ID of the event.
++ * @event: The event provided to the callbacks.
++ *
++ * Execute registered callbacks in order of their priority until either no
++ * callback is left or a callback returns a value with the %SSAM_NOTIF_STOP
++ * bit set. Note that this bit is set automatically when converting non-zero
++ * error values via ssam_notifier_from_errno() to notifier values.
++ *
++ * Also note that any callback that could handle an event should return a value
++ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
++ * unhandled/ignored. In case no registered callback could handle an event,
++ * this function will emit a warning.
++ *
++ * In case a callback failed, this function will emit an error message.
++ */
++static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
++			 struct ssam_event *event)
++{
++	struct ssam_nf_head *nf_head;
++	int status, nf_ret;
++
++	if (!ssh_rqid_is_event(rqid)) {
++		dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid);
++		return;
++	}
++
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++	nf_ret = ssam_nfblk_call_chain(nf_head, event);
++	status = ssam_notifier_to_errno(nf_ret);
++
++	if (status < 0) {
++		dev_err(dev,
++			"event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
++			status, event->target_category, event->target_id,
++			event->command_id, event->instance_id);
++	} else if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
++		dev_warn(dev,
++			 "event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
++			 rqid, event->target_category, event->target_id,
++			 event->command_id, event->instance_id);
++	}
++}
++
++/**
++ * ssam_nf_init() - Initialize the notifier system.
++ * @nf: The notifier system to initialize.
++ */
++static int ssam_nf_init(struct ssam_nf *nf)
++{
++	int i, status;
++
++	for (i = 0; i < SSH_NUM_EVENTS; i++) {
++		status = ssam_nf_head_init(&nf->head[i]);
++		if (status)
++			break;
++	}
++
++	if (status) {
++		while (i--)
++			ssam_nf_head_destroy(&nf->head[i]);
++
++		return status;
++	}
++
++	mutex_init(&nf->lock);
++	return 0;
++}
++
++/**
++ * ssam_nf_destroy() - Deinitialize the notifier system.
++ * @nf: The notifier system to deinitialize.
++ */
++static void ssam_nf_destroy(struct ssam_nf *nf)
++{
++	int i;
++
++	for (i = 0; i < SSH_NUM_EVENTS; i++)
++		ssam_nf_head_destroy(&nf->head[i]);
++
++	mutex_destroy(&nf->lock);
++}
++
++
++/* -- Event/async request completion system. -------------------------------- */
++
++#define SSAM_CPLT_WQ_NAME	"ssam_cpltq"
++
++/*
++ * SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per
++ * work execution. Used to prevent livelocking of the workqueue. Value chosen
++ * via educated guess, may be adjusted.
++ */
++#define SSAM_CPLT_WQ_BATCH	10
++
++/**
++ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
++ * @len:   The event payload length.
++ * @flags: The flags used for allocation.
++ *
++ * Allocate an event item with the given payload size. Sets the item
++ * operations and payload length values. The item free callback (``ops.free``)
++ * should not be overwritten after this call.
++ *
++ * Return: Returns the newly allocated event item.
++ */
++static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
++{
++	struct ssam_event_item *item;
++
++	item = kzalloc(struct_size(item, event.data, len), flags);
++	if (!item)
++		return NULL;
++
++	item->event.length = len;
++	return item;
++}
++
++/**
++ * ssam_event_queue_push() - Push an event item to the event queue.
++ * @q:    The event queue.
++ * @item: The item to add.
++ */
++static void ssam_event_queue_push(struct ssam_event_queue *q,
++				  struct ssam_event_item *item)
++{
++	spin_lock(&q->lock);
++	list_add_tail(&item->node, &q->head);
++	spin_unlock(&q->lock);
++}
++
++/**
++ * ssam_event_queue_pop() - Pop the next event item from the event queue.
++ * @q: The event queue.
++ *
++ * Returns and removes the next event item from the queue. Returns %NULL If
++ * there is no event item left.
++ */
++static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
++{
++	struct ssam_event_item *item;
++
++	spin_lock(&q->lock);
++	item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
++	if (item)
++		list_del(&item->node);
++	spin_unlock(&q->lock);
++
++	return item;
++}
++
++/**
++ * ssam_event_queue_is_empty() - Check if the event queue is empty.
++ * @q: The event queue.
++ */
++static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
++{
++	bool empty;
++
++	spin_lock(&q->lock);
++	empty = list_empty(&q->head);
++	spin_unlock(&q->lock);
++
++	return empty;
++}
++
++/**
++ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
++ * @cplt: The completion system on which to look for the queue.
++ * @tid:  The target ID of the queue.
++ * @rqid: The request ID representing the event ID for which to get the queue.
++ *
++ * Return: Returns the event queue corresponding to the event type described
++ * by the given parameters. If the request ID does not represent an event,
++ * this function returns %NULL. If the target ID is not supported, this
++ * function will fall back to the default target ID (``tid = 1``).
++ */
++static
++struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt,
++						   u8 tid, u16 rqid)
++{
++	u16 event = ssh_rqid_to_event(rqid);
++	u16 tidx = ssh_tid_to_index(tid);
++
++	if (!ssh_rqid_is_event(rqid)) {
++		dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid);
++		return NULL;
++	}
++
++	if (!ssh_tid_is_valid(tid)) {
++		dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
++		tidx = 0;
++	}
++
++	return &cplt->event.target[tidx].queue[event];
++}
++
++/**
++ * ssam_cplt_submit() - Submit a work item to the completion system workqueue.
++ * @cplt: The completion system.
++ * @work: The work item to submit.
++ */
++static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
++{
++	return queue_work(cplt->wq, work);
++}
++
++/**
++ * ssam_cplt_submit_event() - Submit an event to the completion system.
++ * @cplt: The completion system.
++ * @item: The event item to submit.
++ *
++ * Submits the event to the completion system by queuing it on the event item
++ * queue and queuing the respective event queue work item on the completion
++ * workqueue, which will eventually complete the event.
++ *
++ * Return: Returns zero on success, %-EINVAL if there is no event queue that
++ * can handle the given event item.
++ */
++static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
++				  struct ssam_event_item *item)
++{
++	struct ssam_event_queue *evq;
++
++	evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
++	if (!evq)
++		return -EINVAL;
++
++	ssam_event_queue_push(evq, item);
++	ssam_cplt_submit(cplt, &evq->work);
++	return 0;
++}
++
++/**
++ * ssam_cplt_flush() - Flush the completion system.
++ * @cplt: The completion system.
++ *
++ * Flush the completion system by waiting until all currently submitted work
++ * items have been completed.
++ *
++ * Note: This function does not guarantee that all events will have been
++ * handled once this call terminates. In case of a larger number of
++ * to-be-completed events, the event queue work function may re-schedule its
++ * work item, which this flush operation will ignore.
++ *
++ * This operation is only intended to, during normal operation prior to
++ * shutdown, try to complete most events and requests to get them out of the
++ * system while the system is still fully operational. It does not aim to
++ * provide any guarantee that all of them have been handled.
++ */
++static void ssam_cplt_flush(struct ssam_cplt *cplt)
++{
++	flush_workqueue(cplt->wq);
++}
++
++static void ssam_event_queue_work_fn(struct work_struct *work)
++{
++	struct ssam_event_queue *queue;
++	struct ssam_event_item *item;
++	struct ssam_nf *nf;
++	struct device *dev;
++	unsigned int iterations = SSAM_CPLT_WQ_BATCH;
++
++	queue = container_of(work, struct ssam_event_queue, work);
++	nf = &queue->cplt->event.notif;
++	dev = queue->cplt->dev;
++
++	/* Limit number of processed events to avoid livelocking. */
++	do {
++		item = ssam_event_queue_pop(queue);
++		if (!item)
++			return;
++
++		ssam_nf_call(nf, dev, item->rqid, &item->event);
++		kfree(item);
++	} while (--iterations);
++
++	if (!ssam_event_queue_is_empty(queue))
++		ssam_cplt_submit(queue->cplt, &queue->work);
++}
++
++/**
++ * ssam_event_queue_init() - Initialize an event queue.
++ * @cplt: The completion system on which the queue resides.
++ * @evq:  The event queue to initialize.
++ */
++static void ssam_event_queue_init(struct ssam_cplt *cplt,
++				  struct ssam_event_queue *evq)
++{
++	evq->cplt = cplt;
++	spin_lock_init(&evq->lock);
++	INIT_LIST_HEAD(&evq->head);
++	INIT_WORK(&evq->work, ssam_event_queue_work_fn);
++}
++
++/**
++ * ssam_cplt_init() - Initialize completion system.
++ * @cplt: The completion system to initialize.
++ * @dev:  The device used for logging.
++ */
++static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
++{
++	struct ssam_event_target *target;
++	int status, c, i;
++
++	cplt->dev = dev;
++
++	cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
++	if (!cplt->wq)
++		return -ENOMEM;
++
++	for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
++		target = &cplt->event.target[c];
++
++		for (i = 0; i < ARRAY_SIZE(target->queue); i++)
++			ssam_event_queue_init(cplt, &target->queue[i]);
++	}
++
++	status = ssam_nf_init(&cplt->event.notif);
++	if (status)
++		destroy_workqueue(cplt->wq);
++
++	return status;
++}
++
++/**
++ * ssam_cplt_destroy() - Deinitialize the completion system.
++ * @cplt: The completion system to deinitialize.
++ *
++ * Deinitialize the given completion system and ensure that all pending, i.e.
++ * yet-to-be-completed, event items and requests have been handled.
++ */
++static void ssam_cplt_destroy(struct ssam_cplt *cplt)
++{
++	/*
++	 * Note: destroy_workqueue ensures that all currently queued work will
++	 * be fully completed and the workqueue drained. This means that this
++	 * call will inherently also free any queued ssam_event_items, thus we
++	 * don't have to take care of that here explicitly.
++	 */
++	destroy_workqueue(cplt->wq);
++	ssam_nf_destroy(&cplt->event.notif);
++}
++
++
++/* -- Main SSAM device structures. ------------------------------------------ */
++
++/**
++ * ssam_controller_device() - Get the &struct device associated with this
++ * controller.
++ * @c: The controller for which to get the device.
++ *
++ * Return: Returns the &struct device associated with this controller,
++ * providing its lower-level transport.
++ */
++struct device *ssam_controller_device(struct ssam_controller *c)
++{
++	return ssh_rtl_get_device(&c->rtl);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_device);
++
++static void __ssam_controller_release(struct kref *kref)
++{
++	struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
++
++	/*
++	 * The lock-call here is to satisfy lockdep. At this point we really
++	 * expect this to be the last remaining reference to the controller.
++	 * Anything else is a bug.
++	 */
++	ssam_controller_lock(ctrl);
++	ssam_controller_destroy(ctrl);
++	ssam_controller_unlock(ctrl);
++
++	kfree(ctrl);
++}
++
++/**
++ * ssam_controller_get() - Increment reference count of controller.
++ * @c: The controller.
++ *
++ * Return: Returns the controller provided as input.
++ */
++struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
++{
++	if (c)
++		kref_get(&c->kref);
++	return c;
++}
++EXPORT_SYMBOL_GPL(ssam_controller_get);
++
++/**
++ * ssam_controller_put() - Decrement reference count of controller.
++ * @c: The controller.
++ */
++void ssam_controller_put(struct ssam_controller *c)
++{
++	if (c)
++		kref_put(&c->kref, __ssam_controller_release);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_put);
++
++/**
++ * ssam_controller_statelock() - Lock the controller against state transitions.
++ * @c: The controller to lock.
++ *
++ * Lock the controller against state transitions. Holding this lock guarantees
++ * that the controller will not transition between states, i.e. if the
++ * controller is in state "started", when this lock has been acquired, it will
++ * remain in this state at least until the lock has been released.
++ *
++ * Multiple clients may concurrently hold this lock. In other words: The
++ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
++ * Actions causing state transitions of the controller must be executed while
++ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
++ * and ssam_controller_unlock() for that).
++ *
++ * See ssam_controller_stateunlock() for the corresponding unlock function.
++ */
++void ssam_controller_statelock(struct ssam_controller *c)
++{
++	down_read(&c->lock);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_statelock);
++
++/**
++ * ssam_controller_stateunlock() - Unlock controller state transitions.
++ * @c: The controller to unlock.
++ *
++ * See ssam_controller_statelock() for the corresponding lock function.
++ */
++void ssam_controller_stateunlock(struct ssam_controller *c)
++{
++	up_read(&c->lock);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
++
++/**
++ * ssam_controller_lock() - Acquire the main controller lock.
++ * @c: The controller to lock.
++ *
++ * This lock must be held for any state transitions, including transition to
++ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
++ * for more details on controller locking.
++ *
++ * See ssam_controller_unlock() for the corresponding unlock function.
++ */
++void ssam_controller_lock(struct ssam_controller *c)
++{
++	down_write(&c->lock);
++}
++
++/*
++ * ssam_controller_unlock() - Release the main controller lock.
++ * @c: The controller to unlock.
++ *
++ * See ssam_controller_lock() for the corresponding lock function.
++ */
++void ssam_controller_unlock(struct ssam_controller *c)
++{
++	up_write(&c->lock);
++}
++
++static void ssam_handle_event(struct ssh_rtl *rtl,
++			      const struct ssh_command *cmd,
++			      const struct ssam_span *data)
++{
++	struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
++	struct ssam_event_item *item;
++
++	item = ssam_event_item_alloc(data->len, GFP_KERNEL);
++	if (!item)
++		return;
++
++	item->rqid = get_unaligned_le16(&cmd->rqid);
++	item->event.target_category = cmd->tc;
++	item->event.target_id = cmd->tid_in;
++	item->event.command_id = cmd->cid;
++	item->event.instance_id = cmd->iid;
++	memcpy(&item->event.data[0], data->ptr, data->len);
++
++	if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
++		kfree(item);
++}
++
++static const struct ssh_rtl_ops ssam_rtl_ops = {
++	.handle_event = ssam_handle_event,
++};
++
++static bool ssam_notifier_is_empty(struct ssam_controller *ctrl);
++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
++
++#define SSAM_SSH_DSM_REVISION	0
++
++/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */
++static const guid_t SSAM_SSH_DSM_GUID =
++	GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
++		  0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
++
++enum ssh_dsm_fn {
++	SSH_DSM_FN_SSH_POWER_PROFILE             = 0x05,
++	SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT  = 0x06,
++	SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
++	SSH_DSM_FN_D3_CLOSES_HANDLE              = 0x08,
++	SSH_DSM_FN_SSH_BUFFER_SIZE               = 0x09,
++};
++
++static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
++{
++	union acpi_object *obj;
++	u64 mask = 0;
++	int i;
++
++	*funcs = 0;
++
++	/*
++	 * The _DSM function is only present on newer models. It is not
++	 * present on 5th and 6th generation devices (i.e. up to and including
++	 * Surface Pro 6, Surface Laptop 2, Surface Book 2).
++	 *
++	 * If the _DSM is not present, indicate that no function is supported.
++	 * This will result in default values being set.
++	 */
++	if (!acpi_has_method(handle, "_DSM"))
++		return 0;
++
++	obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
++				      SSAM_SSH_DSM_REVISION, 0, NULL,
++				      ACPI_TYPE_BUFFER);
++	if (!obj)
++		return -EIO;
++
++	for (i = 0; i < obj->buffer.length && i < 8; i++)
++		mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
++
++	if (mask & BIT(0))
++		*funcs = mask;
++
++	ACPI_FREE(obj);
++	return 0;
++}
++
++static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
++{
++	union acpi_object *obj;
++	u64 val;
++
++	if (!(funcs & BIT(func)))
++		return 0; /* Not supported, leave *ret at its default value */
++
++	obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
++				      SSAM_SSH_DSM_REVISION, func, NULL,
++				      ACPI_TYPE_INTEGER);
++	if (!obj)
++		return -EIO;
++
++	val = obj->integer.value;
++	ACPI_FREE(obj);
++
++	if (val > U32_MAX)
++		return -ERANGE;
++
++	*ret = val;
++	return 0;
++}
++
++/**
++ * ssam_controller_caps_load_from_acpi() - Load controller capabilities from
++ * ACPI _DSM.
++ * @handle: The handle of the ACPI controller/SSH device.
++ * @caps:   Where to store the capabilities in.
++ *
++ * Initializes the given controller capabilities with default values, then
++ * checks and, if the respective _DSM functions are available, loads the
++ * actual capabilities from the _DSM.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++static
++int ssam_controller_caps_load_from_acpi(acpi_handle handle,
++					struct ssam_controller_caps *caps)
++{
++	u32 d3_closes_handle = false;
++	u64 funcs;
++	int status;
++
++	/* Set defaults. */
++	caps->ssh_power_profile = U32_MAX;
++	caps->screen_on_sleep_idle_timeout = U32_MAX;
++	caps->screen_off_sleep_idle_timeout = U32_MAX;
++	caps->d3_closes_handle = false;
++	caps->ssh_buffer_size = U32_MAX;
++
++	/* Pre-load supported DSM functions. */
++	status = ssam_dsm_get_functions(handle, &funcs);
++	if (status)
++		return status;
++
++	/* Load actual values from ACPI, if present. */
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
++				   &caps->ssh_power_profile);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs,
++				   SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
++				   &caps->screen_on_sleep_idle_timeout);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs,
++				   SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
++				   &caps->screen_off_sleep_idle_timeout);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
++				   &d3_closes_handle);
++	if (status)
++		return status;
++
++	caps->d3_closes_handle = !!d3_closes_handle;
++
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
++				   &caps->ssh_buffer_size);
++	if (status)
++		return status;
++
++	return 0;
++}
++
++/**
++ * ssam_controller_init() - Initialize SSAM controller.
++ * @ctrl:   The controller to initialize.
++ * @serdev: The serial device representing the underlying data transport.
++ *
++ * Initializes the given controller. Does neither start receiver nor
++ * transmitter threads. After this call, the controller has to be hooked up to
++ * the serdev core separately via &struct serdev_device_ops, relaying calls to
++ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
++ * controller has been hooked up, transmitter and receiver threads may be
++ * started via ssam_controller_start(). These setup steps need to be completed
++ * before controller can be used for requests.
++ */
++int ssam_controller_init(struct ssam_controller *ctrl,
++			 struct serdev_device *serdev)
++{
++	acpi_handle handle = ACPI_HANDLE(&serdev->dev);
++	int status;
++
++	init_rwsem(&ctrl->lock);
++	kref_init(&ctrl->kref);
++
++	status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
++	if (status)
++		return status;
++
++	dev_dbg(&serdev->dev,
++		"device capabilities:\n"
++		"  ssh_power_profile:             %u\n"
++		"  ssh_buffer_size:               %u\n"
++		"  screen_on_sleep_idle_timeout:  %u\n"
++		"  screen_off_sleep_idle_timeout: %u\n"
++		"  d3_closes_handle:              %u\n",
++		ctrl->caps.ssh_power_profile,
++		ctrl->caps.ssh_buffer_size,
++		ctrl->caps.screen_on_sleep_idle_timeout,
++		ctrl->caps.screen_off_sleep_idle_timeout,
++		ctrl->caps.d3_closes_handle);
++
++	ssh_seq_reset(&ctrl->counter.seq);
++	ssh_rqid_reset(&ctrl->counter.rqid);
++
++	/* Initialize event/request completion system. */
++	status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
++	if (status)
++		return status;
++
++	/* Initialize request and packet transport layers. */
++	status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
++	if (status) {
++		ssam_cplt_destroy(&ctrl->cplt);
++		return status;
++	}
++
++	/*
++	 * Set state via write_once even though we expect to be in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
++	return 0;
++}
++
++/**
++ * ssam_controller_start() - Start the receiver and transmitter threads of the
++ * controller.
++ * @ctrl: The controller.
++ *
++ * Note: When this function is called, the controller should be properly
++ * hooked up to the serdev core via &struct serdev_device_ops. Please refer
++ * to ssam_controller_init() for more details on controller initialization.
++ *
++ * This function must be called with the main controller lock held (i.e. by
++ * calling ssam_controller_lock()).
++ */
++int ssam_controller_start(struct ssam_controller *ctrl)
++{
++	int status;
++
++	lockdep_assert_held_write(&ctrl->lock);
++
++	if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
++		return -EINVAL;
++
++	status = ssh_rtl_start(&ctrl->rtl);
++	if (status)
++		return status;
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
++	return 0;
++}
++
++/*
++ * SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during
++ * shutdown.
++ *
++ * Chosen to be larger than one full request timeout, including packets timing
++ * out. This value should give ample time to complete any outstanding requests
++ * during normal operation and account for the odd package timeout.
++ */
++#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT	msecs_to_jiffies(5000)
++
++/**
++ * ssam_controller_shutdown() - Shut down the controller.
++ * @ctrl: The controller.
++ *
++ * Shuts down the controller by flushing all pending requests and stopping the
++ * transmitter and receiver threads. All requests submitted after this call
++ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
++ * is safe to use in parallel with ongoing request submission.
++ *
++ * In the course of this shutdown procedure, all currently registered
++ * notifiers will be unregistered. It is, however, strongly recommended to not
++ * rely on this behavior, and instead the party registering the notifier
++ * should unregister it before the controller gets shut down, e.g. via the
++ * SSAM bus which guarantees client devices to be removed before a shutdown.
++ *
++ * Note that events may still be pending after this call, but, due to the
++ * notifiers being unregistered, these events will be dropped when the
++ * controller is subsequently destroyed via ssam_controller_destroy().
++ *
++ * This function must be called with the main controller lock held (i.e. by
++ * calling ssam_controller_lock()).
++ */
++void ssam_controller_shutdown(struct ssam_controller *ctrl)
++{
++	enum ssam_controller_state s = ctrl->state;
++	int status;
++
++	lockdep_assert_held_write(&ctrl->lock);
++
++	if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
++		return;
++
++	/*
++	 * Try to flush pending events and requests while everything still
++	 * works. Note: There may still be packets and/or requests in the
++	 * system after this call (e.g. via control packets submitted by the
++	 * packet transport layer or flush timeout / failure, ...). Those will
++	 * be handled with the ssh_rtl_shutdown() call below.
++	 */
++	status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT);
++	if (status) {
++		ssam_err(ctrl, "failed to flush request transport layer: %d\n",
++			 status);
++	}
++
++	/* Try to flush all currently completing requests and events. */
++	ssam_cplt_flush(&ctrl->cplt);
++
++	/*
++	 * We expect all notifiers to have been removed by the respective client
++	 * driver that set them up at this point. If this warning occurs, some
++	 * client driver has not done that...
++	 */
++	WARN_ON(!ssam_notifier_is_empty(ctrl));
++
++	/*
++	 * Nevertheless, we should still take care of drivers that don't behave
++	 * well. Thus disable all enabled events, unregister all notifiers.
++	 */
++	ssam_notifier_unregister_all(ctrl);
++
++	/*
++	 * Cancel remaining requests. Ensure no new ones can be queued and stop
++	 * threads.
++	 */
++	ssh_rtl_shutdown(&ctrl->rtl);
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
++	ctrl->rtl.ptl.serdev = NULL;
++}
++
++/**
++ * ssam_controller_destroy() - Destroy the controller and free its resources.
++ * @ctrl: The controller.
++ *
++ * Ensures that all resources associated with the controller get freed. This
++ * function should only be called after the controller has been stopped via
++ * ssam_controller_shutdown(). In general, this function should not be called
++ * directly. The only valid place to call this function directly is during
++ * initialization, before the controller has been fully initialized and passed
++ * to other processes. This function is called automatically when the
++ * reference count of the controller reaches zero.
++ *
++ * This function must be called with the main controller lock held (i.e. by
++ * calling ssam_controller_lock()).
++ */
++void ssam_controller_destroy(struct ssam_controller *ctrl)
++{
++	lockdep_assert_held_write(&ctrl->lock);
++
++	if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
++		return;
++
++	WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
++
++	/*
++	 * Note: New events could still have been received after the previous
++	 * flush in ssam_controller_shutdown, before the request transport layer
++	 * has been shut down. At this point, after the shutdown, we can be sure
++	 * that no new events will be queued. The call to ssam_cplt_destroy will
++	 * ensure that those remaining are being completed and freed.
++	 */
++
++	/* Actually free resources. */
++	ssam_cplt_destroy(&ctrl->cplt);
++	ssh_rtl_destroy(&ctrl->rtl);
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
++}
++
++/**
++ * ssam_controller_suspend() - Suspend the controller.
++ * @ctrl: The controller to suspend.
++ *
++ * Marks the controller as suspended. Note that display-off and D0-exit
++ * notifications have to be sent manually before transitioning the controller
++ * into the suspended state via this function.
++ *
++ * See ssam_controller_resume() for the corresponding resume function.
++ *
++ * Return: Returns %-EINVAL if the controller is currently not in the
++ * "started" state.
++ */
++int ssam_controller_suspend(struct ssam_controller *ctrl)
++{
++	ssam_controller_lock(ctrl);
++
++	if (ctrl->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_unlock(ctrl);
++		return -EINVAL;
++	}
++
++	ssam_dbg(ctrl, "pm: suspending controller\n");
++
++	/*
++	 * Set state via write_once even though we're locked, due to
++	 * smoke-testing in ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
++
++	ssam_controller_unlock(ctrl);
++	return 0;
++}
++
++/**
++ * ssam_controller_resume() - Resume the controller from suspend.
++ * @ctrl: The controller to resume.
++ *
++ * Resume the controller from the suspended state it was put into via
++ * ssam_controller_suspend(). This function does not issue display-on and
++ * D0-entry notifications. If required, those have to be sent manually after
++ * this call.
++ *
++ * Return: Returns %-EINVAL if the controller is currently not suspended.
++ */
++int ssam_controller_resume(struct ssam_controller *ctrl)
++{
++	ssam_controller_lock(ctrl);
++
++	if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
++		ssam_controller_unlock(ctrl);
++		return -EINVAL;
++	}
++
++	ssam_dbg(ctrl, "pm: resuming controller\n");
++
++	/*
++	 * Set state via write_once even though we're locked, due to
++	 * smoke-testing in ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
++
++	ssam_controller_unlock(ctrl);
++	return 0;
++}
++
++
++/* -- Top-level request interface ------------------------------------------- */
++
++/**
++ * ssam_request_write_data() - Construct and write SAM request message to
++ * buffer.
++ * @buf:  The buffer to write the data to.
++ * @ctrl: The controller via which the request will be sent.
++ * @spec: The request data and specification.
++ *
++ * Constructs a SAM/SSH request message and writes it to the provided buffer.
++ * The request and transport counters, specifically RQID and SEQ, will be set
++ * in this call. These counters are obtained from the controller. It is thus
++ * only valid to send the resulting message via the controller specified here.
++ *
++ * For calculation of the required buffer size, refer to the
++ * SSH_COMMAND_MESSAGE_LENGTH() macro.
++ *
++ * Return: Returns the number of bytes used in the buffer on success. Returns
++ * %-EINVAL if the payload length provided in the request specification is too
++ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
++ * is too small.
++ */
++ssize_t ssam_request_write_data(struct ssam_span *buf,
++				struct ssam_controller *ctrl,
++				const struct ssam_request *spec)
++{
++	struct msgbuf msgb;
++	u16 rqid;
++	u8 seq;
++
++	if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
++		return -EINVAL;
++
++	if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
++		return -EINVAL;
++
++	msgb_init(&msgb, buf->ptr, buf->len);
++	seq = ssh_seq_next(&ctrl->counter.seq);
++	rqid = ssh_rqid_next(&ctrl->counter.rqid);
++	msgb_push_cmd(&msgb, seq, rqid, spec);
++
++	return msgb_bytes_used(&msgb);
++}
++EXPORT_SYMBOL_GPL(ssam_request_write_data);
++
++static void ssam_request_sync_complete(struct ssh_request *rqst,
++				       const struct ssh_command *cmd,
++				       const struct ssam_span *data, int status)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++	struct ssam_request_sync *r;
++
++	r = container_of(rqst, struct ssam_request_sync, base);
++	r->status = status;
++
++	if (r->resp)
++		r->resp->length = 0;
++
++	if (status) {
++		rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
++		return;
++	}
++
++	if (!data)	/* Handle requests without a response. */
++		return;
++
++	if (!r->resp || !r->resp->pointer) {
++		if (data->len)
++			rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
++		return;
++	}
++
++	if (data->len > r->resp->capacity) {
++		rtl_err(rtl,
++			"rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n",
++			r->resp->capacity, data->len);
++		r->status = -ENOSPC;
++		return;
++	}
++
++	r->resp->length = data->len;
++	memcpy(r->resp->pointer, data->ptr, data->len);
++}
++
++static void ssam_request_sync_release(struct ssh_request *rqst)
++{
++	complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
++}
++
++static const struct ssh_request_ops ssam_request_sync_ops = {
++	.release = ssam_request_sync_release,
++	.complete = ssam_request_sync_complete,
++};
++
++/**
++ * ssam_request_sync_alloc() - Allocate a synchronous request.
++ * @payload_len: The length of the request payload.
++ * @flags:       Flags used for allocation.
++ * @rqst:        Where to store the pointer to the allocated request.
++ * @buffer:      Where to store the buffer descriptor for the message buffer of
++ *               the request.
++ *
++ * Allocates a synchronous request with corresponding message buffer. The
++ * request still needs to be initialized ssam_request_sync_init() before
++ * it can be submitted, and the message buffer data must still be set to the
++ * returned buffer via ssam_request_sync_set_data() after it has been filled,
++ * if need be with adjusted message length.
++ *
++ * After use, the request and its corresponding message buffer should be freed
++ * via ssam_request_sync_free(). The buffer must not be freed separately.
++ *
++ * Return: Returns zero on success, %-ENOMEM if the request could not be
++ * allocated.
++ */
++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
++			    struct ssam_request_sync **rqst,
++			    struct ssam_span *buffer)
++{
++	size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
++
++	*rqst = kzalloc(sizeof(**rqst) + msglen, flags);
++	if (!*rqst)
++		return -ENOMEM;
++
++	buffer->ptr = (u8 *)(*rqst + 1);
++	buffer->len = msglen;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
++
++/**
++ * ssam_request_sync_free() - Free a synchronous request.
++ * @rqst: The request to be freed.
++ *
++ * Free a synchronous request and its corresponding buffer allocated with
++ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
++ * or via any other function.
++ *
++ * Warning: The caller must ensure that the request is not in use any more.
++ * I.e. the caller must ensure that it has the only reference to the request
++ * and the request is not currently pending. This means that the caller has
++ * either never submitted the request, request submission has failed, or the
++ * caller has waited until the submitted request has been completed via
++ * ssam_request_sync_wait().
++ */
++void ssam_request_sync_free(struct ssam_request_sync *rqst)
++{
++	kfree(rqst);
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_free);
++
++/**
++ * ssam_request_sync_init() - Initialize a synchronous request struct.
++ * @rqst:  The request to initialize.
++ * @flags: The request flags.
++ *
++ * Initializes the given request struct. Does not initialize the request
++ * message data. This has to be done explicitly after this call via
++ * ssam_request_sync_set_data() and the actual message data has to be written
++ * via ssam_request_write_data().
++ *
++ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
++ */
++int ssam_request_sync_init(struct ssam_request_sync *rqst,
++			   enum ssam_request_flags flags)
++{
++	int status;
++
++	status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
++	if (status)
++		return status;
++
++	init_completion(&rqst->comp);
++	rqst->resp = NULL;
++	rqst->status = 0;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_init);
++
++/**
++ * ssam_request_sync_submit() - Submit a synchronous request.
++ * @ctrl: The controller with which to submit the request.
++ * @rqst: The request to submit.
++ *
++ * Submit a synchronous request. The request has to be initialized and
++ * properly set up, including response buffer (may be %NULL if no response is
++ * expected) and command message data. This function does not wait for the
++ * request to be completed.
++ *
++ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
++ * that the request has been completed before the response data can be
++ * accessed and/or the request can be freed. On failure, the request may
++ * immediately be freed.
++ *
++ * This function may only be used if the controller is active, i.e. has been
++ * initialized and not suspended.
++ */
++int ssam_request_sync_submit(struct ssam_controller *ctrl,
++			     struct ssam_request_sync *rqst)
++{
++	int status;
++
++	/*
++	 * This is only a superficial check. In general, the caller needs to
++	 * ensure that the controller is initialized and is not (and does not
++	 * get) suspended during use, i.e. until the request has been completed
++	 * (if _absolutely_ necessary, by use of ssam_controller_statelock/
++	 * ssam_controller_stateunlock, but something like ssam_client_link
++	 * should be preferred as this needs to last until the request has been
++	 * completed).
++	 *
++	 * Note that it is actually safe to use this function while the
++	 * controller is in the process of being shut down (as ssh_rtl_submit
++	 * is safe with regards to this), but it is generally discouraged to do
++	 * so.
++	 */
++	if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
++		ssh_request_put(&rqst->base);
++		return -ENODEV;
++	}
++
++	status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
++	ssh_request_put(&rqst->base);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
++
++/**
++ * ssam_request_sync() - Execute a synchronous request.
++ * @ctrl: The controller via which the request will be submitted.
++ * @spec: The request specification and payload.
++ * @rsp:  The response buffer.
++ *
++ * Allocates a synchronous request with its message data buffer on the heap
++ * via ssam_request_sync_alloc(), fully initializes it via the provided
++ * request specification, submits it, and finally waits for its completion
++ * before freeing it and returning its status.
++ *
++ * Return: Returns the status of the request or any failure during setup.
++ */
++int ssam_request_sync(struct ssam_controller *ctrl,
++		      const struct ssam_request *spec,
++		      struct ssam_response *rsp)
++{
++	struct ssam_request_sync *rqst;
++	struct ssam_span buf;
++	ssize_t len;
++	int status;
++
++	status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
++	if (status)
++		return status;
++
++	status = ssam_request_sync_init(rqst, spec->flags);
++	if (status)
++		return status;
++
++	ssam_request_sync_set_resp(rqst, rsp);
++
++	len = ssam_request_write_data(&buf, ctrl, spec);
++	if (len < 0) {
++		ssam_request_sync_free(rqst);
++		return len;
++	}
++
++	ssam_request_sync_set_data(rqst, buf.ptr, len);
++
++	status = ssam_request_sync_submit(ctrl, rqst);
++	if (!status)
++		status = ssam_request_sync_wait(rqst);
++
++	ssam_request_sync_free(rqst);
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync);
++
++/**
++ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
++ * provided buffer as back-end for the message buffer.
++ * @ctrl: The controller via which the request will be submitted.
++ * @spec: The request specification and payload.
++ * @rsp:  The response buffer.
++ * @buf:  The buffer for the request message data.
++ *
++ * Allocates a synchronous request struct on the stack, fully initializes it
++ * using the provided buffer as message data buffer, submits it, and then
++ * waits for its completion before returning its status. The
++ * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
++ * message buffer size.
++ *
++ * This function does essentially the same as ssam_request_sync(), but instead
++ * of dynamically allocating the request and message data buffer, it uses the
++ * provided message data buffer and stores the (small) request struct on the
++ * heap.
++ *
++ * Return: Returns the status of the request or any failure during setup.
++ */
++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
++				  const struct ssam_request *spec,
++				  struct ssam_response *rsp,
++				  struct ssam_span *buf)
++{
++	struct ssam_request_sync rqst;
++	ssize_t len;
++	int status;
++
++	status = ssam_request_sync_init(&rqst, spec->flags);
++	if (status)
++		return status;
++
++	ssam_request_sync_set_resp(&rqst, rsp);
++
++	len = ssam_request_write_data(buf, ctrl, spec);
++	if (len < 0)
++		return len;
++
++	ssam_request_sync_set_data(&rqst, buf->ptr, len);
++
++	status = ssam_request_sync_submit(ctrl, &rqst);
++	if (!status)
++		status = ssam_request_sync_wait(&rqst);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
++
++
++/* -- Internal SAM requests. ------------------------------------------------ */
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x13,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x15,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x16,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x33,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x34,
++	.instance_id     = 0x00,
++});
++
++/**
++ * struct ssh_notification_params - Command payload to enable/disable SSH
++ * notifications.
++ * @target_category: The target category for which notifications should be
++ *                   enabled/disabled.
++ * @flags:           Flags determining how notifications are being sent.
++ * @request_id:      The request ID that is used to send these notifications.
++ * @instance_id:     The specific instance in the given target category for
++ *                   which notifications should be enabled.
++ */
++struct ssh_notification_params {
++	u8 target_category;
++	u8 flags;
++	__le16 request_id;
++	u8 instance_id;
++} __packed;
++
++static_assert(sizeof(struct ssh_notification_params) == 5);
++
++static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
++				    struct ssam_event_registry reg, u8 cid,
++				    struct ssam_event_id id, u8 flags)
++{
++	struct ssh_notification_params params;
++	struct ssam_request rqst;
++	struct ssam_response result;
++	int status;
++
++	u16 rqid = ssh_tc_to_rqid(id.target_category);
++	u8 buf = 0;
++
++	/* Only allow RQIDs that lie within the event spectrum. */
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	params.target_category = id.target_category;
++	params.instance_id = id.instance;
++	params.flags = flags;
++	put_unaligned_le16(rqid, &params.request_id);
++
++	rqst.target_category = reg.target_category;
++	rqst.target_id = reg.target_id;
++	rqst.command_id = cid;
++	rqst.instance_id = 0x00;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(params);
++	rqst.payload = (u8 *)&params;
++
++	result.capacity = sizeof(buf);
++	result.length = 0;
++	result.pointer = &buf;
++
++	status = ssam_retry(ssam_request_sync_onstack, ctrl, &rqst, &result,
++			    sizeof(params));
++
++	return status < 0 ? status : buf;
++}
++
++/**
++ * ssam_ssh_event_enable() - Enable SSH event.
++ * @ctrl:  The controller for which to enable the event.
++ * @reg:   The event registry describing what request to use for enabling and
++ *         disabling the event.
++ * @id:    The event identifier.
++ * @flags: The event flags.
++ *
++ * Enables the specified event on the EC. This function does not manage
++ * reference counting of enabled events and is basically only a wrapper for
++ * the raw EC request. If the specified event is already enabled, the EC will
++ * ignore this request.
++ *
++ * Return: Returns the status of the executed SAM request (zero on success and
++ * negative on direct failure) or %-EPROTO if the request response indicates a
++ * failure.
++ */
++static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
++				 struct ssam_event_registry reg,
++				 struct ssam_event_id id, u8 flags)
++{
++	int status;
++
++	status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
++
++	if (status < 0 && status != -EINVAL) {
++		ssam_err(ctrl,
++			 "failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
++			 id.target_category, id.instance, reg.target_category);
++	}
++
++	if (status > 0) {
++		ssam_err(ctrl,
++			 "unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
++			 status, id.target_category, id.instance, reg.target_category);
++		return -EPROTO;
++	}
++
++	return status;
++}
++
++/**
++ * ssam_ssh_event_disable() - Disable SSH event.
++ * @ctrl:  The controller for which to disable the event.
++ * @reg:   The event registry describing what request to use for enabling and
++ *         disabling the event (must be same as used when enabling the event).
++ * @id:    The event identifier.
++ * @flags: The event flags (likely ignored for disabling of events).
++ *
++ * Disables the specified event on the EC. This function does not manage
++ * reference counting of enabled events and is basically only a wrapper for
++ * the raw EC request. If the specified event is already disabled, the EC will
++ * ignore this request.
++ *
++ * Return: Returns the status of the executed SAM request (zero on success and
++ * negative on direct failure) or %-EPROTO if the request response indicates a
++ * failure.
++ */
++static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
++				  struct ssam_event_registry reg,
++				  struct ssam_event_id id, u8 flags)
++{
++	int status;
++
++	status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
++
++	if (status < 0 && status != -EINVAL) {
++		ssam_err(ctrl,
++			 "failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
++			 id.target_category, id.instance, reg.target_category);
++	}
++
++	if (status > 0) {
++		ssam_err(ctrl,
++			 "unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
++			 status, id.target_category, id.instance, reg.target_category);
++		return -EPROTO;
++	}
++
++	return status;
++}
++
++
++/* -- Wrappers for internal SAM requests. ----------------------------------- */
++
++/**
++ * ssam_get_firmware_version() - Get the SAM/EC firmware version.
++ * @ctrl:    The controller.
++ * @version: Where to store the version number.
++ *
++ * Return: Returns zero on success or the status of the executed SAM request
++ * if that request failed.
++ */
++int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
++{
++	__le32 __version;
++	int status;
++
++	status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version);
++	if (status)
++		return status;
++
++	*version = le32_to_cpu(__version);
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
++ * off.
++ * @ctrl: The controller.
++ *
++ * Notify the EC that the display has been turned off and the driver may enter
++ * a lower-power state. This will prevent events from being sent directly.
++ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
++ * as there are pending events. The events then need to be manually released,
++ * one by one, via the GPIO callback request. All pending events accumulated
++ * during this state can also be released by issuing the display-on
++ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
++ * the GPIO.
++ *
++ * On some devices, specifically ones with an integrated keyboard, the keyboard
++ * backlight will be turned off by this call.
++ *
++ * This function will only send the display-off notification command if
++ * display notifications are supported by the EC. Currently all known devices
++ * support these notifications.
++ *
++ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	ssam_dbg(ctrl, "pm: notifying display off\n");
++
++	status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
++ * @ctrl: The controller.
++ *
++ * Notify the EC that the display has been turned back on and the driver has
++ * exited its lower-power state. This notification is the counterpart to the
++ * display-off notification sent via ssam_ctrl_notif_display_off() and will
++ * reverse its effects, including resetting events to their default behavior.
++ *
++ * This function will only send the display-on notification command if display
++ * notifications are supported by the EC. Currently all known devices support
++ * these notifications.
++ *
++ * See ssam_ctrl_notif_display_off() for more details.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	ssam_dbg(ctrl, "pm: notifying display on\n");
++
++	status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
++ * power state.
++ * @ctrl: The controller
++ *
++ * Notifies the EC that the driver prepares to exit the D0 power state in
++ * favor of a lower-power state. Exact effects of this function related to the
++ * EC are currently unknown.
++ *
++ * This function will only send the D0-exit notification command if D0-state
++ * notifications are supported by the EC. Only newer Surface generations
++ * support these notifications.
++ *
++ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	if (!ctrl->caps.d3_closes_handle)
++		return 0;
++
++	ssam_dbg(ctrl, "pm: notifying D0 exit\n");
++
++	status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
++ * power state.
++ * @ctrl: The controller
++ *
++ * Notifies the EC that the driver has exited a lower-power state and entered
++ * the D0 power state. Exact effects of this function related to the EC are
++ * currently unknown.
++ *
++ * This function will only send the D0-entry notification command if D0-state
++ * notifications are supported by the EC. Only newer Surface generations
++ * support these notifications.
++ *
++ * See ssam_ctrl_notif_d0_exit() for more details.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	if (!ctrl->caps.d3_closes_handle)
++		return 0;
++
++	ssam_dbg(ctrl, "pm: notifying D0 entry\n");
++
++	status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++
++/* -- Top-level event registry interface. ----------------------------------- */
++
++/**
++ * ssam_notifier_register() - Register an event notifier.
++ * @ctrl: The controller to register the notifier on.
++ * @n:    The event notifier to register.
++ *
++ * Register an event notifier and increment the usage counter of the
++ * associated SAM event. If the event was previously not enabled, it will be
++ * enabled during this call.
++ *
++ * Return: Returns zero on success, %-ENOSPC if there have already been
++ * %INT_MAX notifiers for the event ID/type associated with the notifier block
++ * registered, %-ENOMEM if the corresponding event entry could not be
++ * allocated. If this is the first time that a notifier block is registered
++ * for the specific associated event, returns the status of the event-enable
++ * EC-command.
++ */
++int ssam_notifier_register(struct ssam_controller *ctrl,
++			   struct ssam_event_notifier *n)
++{
++	u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_head *nf_head;
++	struct ssam_nf *nf;
++	int status;
++
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	nf = &ctrl->cplt.event.notif;
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++
++	mutex_lock(&nf->lock);
++
++	entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
++	if (IS_ERR(entry)) {
++		mutex_unlock(&nf->lock);
++		return PTR_ERR(entry);
++	}
++
++	ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
++		 n->event.reg.target_category, n->event.id.target_category,
++		 n->event.id.instance, entry->refcount);
++
++	status = ssam_nfblk_insert(nf_head, &n->base);
++	if (status) {
++		entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
++		if (entry->refcount == 0)
++			kfree(entry);
++
++		mutex_unlock(&nf->lock);
++		return status;
++	}
++
++	if (entry->refcount == 1) {
++		status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
++					       n->event.flags);
++		if (status) {
++			ssam_nfblk_remove(&n->base);
++			kfree(ssam_nf_refcount_dec(nf, n->event.reg, n->event.id));
++			mutex_unlock(&nf->lock);
++			synchronize_srcu(&nf_head->srcu);
++			return status;
++		}
++
++		entry->flags = n->event.flags;
++
++	} else if (entry->flags != n->event.flags) {
++		ssam_warn(ctrl,
++			  "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
++			  n->event.flags, entry->flags, n->event.reg.target_category,
++			  n->event.id.target_category, n->event.id.instance);
++	}
++
++	mutex_unlock(&nf->lock);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_notifier_register);
++
++/**
++ * ssam_notifier_unregister() - Unregister an event notifier.
++ * @ctrl: The controller the notifier has been registered on.
++ * @n:    The event notifier to unregister.
++ *
++ * Unregister an event notifier and decrement the usage counter of the
++ * associated SAM event. If the usage counter reaches zero, the event will be
++ * disabled.
++ *
++ * Return: Returns zero on success, %-ENOENT if the given notifier block has
++ * not been registered on the controller. If the given notifier block was the
++ * last one associated with its specific event, returns the status of the
++ * event-disable EC-command.
++ */
++int ssam_notifier_unregister(struct ssam_controller *ctrl,
++			     struct ssam_event_notifier *n)
++{
++	u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_head *nf_head;
++	struct ssam_nf *nf;
++	int status = 0;
++
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	nf = &ctrl->cplt.event.notif;
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++
++	mutex_lock(&nf->lock);
++
++	if (!ssam_nfblk_find(nf_head, &n->base)) {
++		mutex_unlock(&nf->lock);
++		return -ENOENT;
++	}
++
++	entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
++	if (WARN_ON(!entry)) {
++		/*
++		 * If this does not return an entry, there's a logic error
++		 * somewhere: The notifier block is registered, but the event
++		 * refcount entry is not there. Remove the notifier block
++		 * anyways.
++		 */
++		status = -ENOENT;
++		goto remove;
++	}
++
++	ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
++		 n->event.reg.target_category, n->event.id.target_category,
++		 n->event.id.instance, entry->refcount);
++
++	if (entry->flags != n->event.flags) {
++		ssam_warn(ctrl,
++			  "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
++			  n->event.flags, entry->flags, n->event.reg.target_category,
++			  n->event.id.target_category, n->event.id.instance);
++	}
++
++	if (entry->refcount == 0) {
++		status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
++						n->event.flags);
++		kfree(entry);
++	}
++
++remove:
++	ssam_nfblk_remove(&n->base);
++	mutex_unlock(&nf->lock);
++	synchronize_srcu(&nf_head->srcu);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
++
++/**
++ * ssam_notifier_disable_registered() - Disable events for all registered
++ * notifiers.
++ * @ctrl: The controller for which to disable the notifiers/events.
++ *
++ * Disables events for all currently registered notifiers. In case of an error
++ * (EC command failing), all previously disabled events will be restored and
++ * the error code returned.
++ *
++ * This function is intended to disable all events prior to hibernation entry.
++ * See ssam_notifier_restore_registered() to restore/re-enable all events
++ * disabled with this function.
++ *
++ * Note that this function will not disable events for notifiers registered
++ * after calling this function. It should thus be made sure that no new
++ * notifiers are going to be added after this call and before the corresponding
++ * call to ssam_notifier_restore_registered().
++ *
++ * Return: Returns zero on success. In case of failure returns the error code
++ * returned by the failed EC command to disable an event.
++ */
++int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct rb_node *n;
++	int status;
++
++	mutex_lock(&nf->lock);
++	for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++		status = ssam_ssh_event_disable(ctrl, e->key.reg,
++						e->key.id, e->flags);
++		if (status)
++			goto err;
++	}
++	mutex_unlock(&nf->lock);
++
++	return 0;
++
++err:
++	for (n = rb_prev(n); n; n = rb_prev(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++		ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
++	}
++	mutex_unlock(&nf->lock);
++
++	return status;
++}
++
++/**
++ * ssam_notifier_restore_registered() - Restore/re-enable events for all
++ * registered notifiers.
++ * @ctrl: The controller for which to restore the notifiers/events.
++ *
++ * Restores/re-enables all events for which notifiers have been registered on
++ * the given controller. In case of a failure, the error is logged and the
++ * function continues to try and enable the remaining events.
++ *
++ * This function is intended to restore/re-enable all registered events after
++ * hibernation. See ssam_notifier_disable_registered() for the counter part
++ * disabling the events and more details.
++ */
++void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct rb_node *n;
++
++	mutex_lock(&nf->lock);
++	for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++
++		/* Ignore errors, will get logged in call. */
++		ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
++	}
++	mutex_unlock(&nf->lock);
++}
++
++/**
++ * ssam_notifier_is_empty() - Check if there are any registered notifiers.
++ * @ctrl: The controller to check on.
++ *
++ * Return: Returns %true if there are currently no notifiers registered on the
++ * controller, %false otherwise.
++ */
++static bool ssam_notifier_is_empty(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	bool result;
++
++	mutex_lock(&nf->lock);
++	result = ssam_nf_refcount_empty(nf);
++	mutex_unlock(&nf->lock);
++
++	return result;
++}
++
++/**
++ * ssam_notifier_unregister_all() - Unregister all currently registered
++ * notifiers.
++ * @ctrl: The controller to unregister the notifiers on.
++ *
++ * Unregisters all currently registered notifiers. This function is used to
++ * ensure that all notifiers will be unregistered and associated
++ * entries/resources freed when the controller is being shut down.
++ */
++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct ssam_nf_refcount_entry *e, *n;
++
++	mutex_lock(&nf->lock);
++	rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
++		/* Ignore errors, will get logged in call. */
++		ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
++		kfree(e);
++	}
++	nf->refcount = RB_ROOT;
++	mutex_unlock(&nf->lock);
++}
++
++
++/* -- Wakeup IRQ. ----------------------------------------------------------- */
++
++static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
++{
++	struct ssam_controller *ctrl = dev_id;
++
++	ssam_dbg(ctrl, "pm: wake irq triggered\n");
++
++	/*
++	 * Note: Proper wakeup detection is currently unimplemented.
++	 *       When the EC is in display-off or any other non-D0 state, it
++	 *       does not send events/notifications to the host. Instead it
++	 *       signals that there are events available via the wakeup IRQ.
++	 *       This driver is responsible for calling back to the EC to
++	 *       release these events one-by-one.
++	 *
++	 *       This IRQ should not cause a full system resume by its own.
++	 *       Instead, events should be handled by their respective subsystem
++	 *       drivers, which in turn should signal whether a full system
++	 *       resume should be performed.
++	 *
++	 * TODO: Send GPIO callback command repeatedly to EC until callback
++	 *       returns 0x00. Return flag of callback is "has more events".
++	 *       Each time the command is sent, one event is "released". Once
++	 *       all events have been released (return = 0x00), the GPIO is
++	 *       re-armed. Detect wakeup events during this process, go back to
++	 *       sleep if no wakeup event has been received.
++	 */
++
++	return IRQ_HANDLED;
++}
++
++/**
++ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
++ * @ctrl: The controller for which the IRQ should be set up.
++ *
++ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
++ * to wake the device from a low power state.
++ *
++ * Note that this IRQ can only be triggered while the EC is in the display-off
++ * state. In this state, events are not sent to the host in the usual way.
++ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
++ * events and these events need to be released one-by-one via the GPIO
++ * callback request, either until there are no events left and the GPIO is
++ * reset, or all at once by transitioning the EC out of the display-off state,
++ * which will also clear the GPIO.
++ *
++ * Not all events, however, should trigger a full system wakeup. Instead the
++ * driver should, if necessary, inspect and forward each event to the
++ * corresponding subsystem, which in turn should decide if the system needs to
++ * be woken up. This logic has not been implemented yet, thus wakeup by this
++ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
++ * example, by the remaining battery percentage changing. Refer to comments in
++ * this function and comments in the corresponding IRQ handler for more
++ * details on how this should be implemented.
++ *
++ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
++ * for functions to transition the EC into and out of the display-off state as
++ * well as more details on it.
++ *
++ * The IRQ is disabled by default and has to be enabled before it can wake up
++ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
++ * should be freed via ssam_irq_free().
++ */
++int ssam_irq_setup(struct ssam_controller *ctrl)
++{
++	struct device *dev = ssam_controller_device(ctrl);
++	struct gpio_desc *gpiod;
++	int irq;
++	int status;
++
++	/*
++	 * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
++	 * However, the GPIO line only gets reset by sending the GPIO callback
++	 * command to SAM (or alternatively the display-on notification). As
++	 * proper handling for this interrupt is not implemented yet, leaving
++	 * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
++	 * never gets sent and thus the line never gets reset). To avoid this,
++	 * mark the IRQ as TRIGGER_RISING for now, only creating a single
++	 * interrupt, and let the SAM resume callback during the controller
++	 * resume process clear it.
++	 */
++	const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
++
++	gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
++	if (IS_ERR(gpiod))
++		return PTR_ERR(gpiod);
++
++	irq = gpiod_to_irq(gpiod);
++	gpiod_put(gpiod);
++
++	if (irq < 0)
++		return irq;
++
++	status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
++				      "ssam_wakeup", ctrl);
++	if (status)
++		return status;
++
++	ctrl->irq.num = irq;
++	disable_irq(ctrl->irq.num);
++	return 0;
++}
++
++/**
++ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
++ * @ctrl: The controller for which the IRQ should be freed.
++ *
++ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
++ */
++void ssam_irq_free(struct ssam_controller *ctrl)
++{
++	free_irq(ctrl->irq.num, ctrl);
++	ctrl->irq.num = -1;
++}
++
++/**
++ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
++ * @ctrl: The controller for which the IRQ should be armed.
++ *
++ * Sets up the IRQ so that it can be used to wake the device. Specifically,
++ * this function enables the irq and then, if the device is allowed to wake up
++ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
++ * corresponding function to disable the IRQ.
++ *
++ * This function is intended to arm the IRQ before entering S2idle suspend.
++ *
++ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
++ * be balanced.
++ */
++int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
++{
++	struct device *dev = ssam_controller_device(ctrl);
++	int status;
++
++	enable_irq(ctrl->irq.num);
++	if (device_may_wakeup(dev)) {
++		status = enable_irq_wake(ctrl->irq.num);
++		if (status) {
++			ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
++			disable_irq(ctrl->irq.num);
++			return status;
++		}
++
++		ctrl->irq.wakeup_enabled = true;
++	} else {
++		ctrl->irq.wakeup_enabled = false;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
++ * @ctrl: The controller for which the IRQ should be disarmed.
++ *
++ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
++ *
++ * This function is intended to disarm the IRQ after exiting S2idle suspend.
++ *
++ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
++ * be balanced.
++ */
++void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
++{
++	int status;
++
++	if (ctrl->irq.wakeup_enabled) {
++		status = disable_irq_wake(ctrl->irq.num);
++		if (status)
++			ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
++
++		ctrl->irq.wakeup_enabled = false;
++	}
++	disable_irq(ctrl->irq.num);
++}
+diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
+new file mode 100644
+index 000000000000..5ee9e966f1d7
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/controller.h
+@@ -0,0 +1,276 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Main SSAM/SSH controller structure and functionality.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
++#define _SURFACE_AGGREGATOR_CONTROLLER_H
++
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/serdev.h>
++#include <linux/spinlock.h>
++#include <linux/srcu.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "ssh_request_layer.h"
++
++
++/* -- Safe counters. -------------------------------------------------------- */
++
++/**
++ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
++ * @value: The current counter value.
++ */
++struct ssh_seq_counter {
++	u8 value;
++};
++
++/**
++ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
++ * @value: The current counter value.
++ */
++struct ssh_rqid_counter {
++	u16 value;
++};
++
++
++/* -- Event/notification system. -------------------------------------------- */
++
++/**
++ * struct ssam_nf_head - Notifier head for SSAM events.
++ * @srcu: The SRCU struct for synchronization.
++ * @head: List-head for notifier blocks registered under this head.
++ */
++struct ssam_nf_head {
++	struct srcu_struct srcu;
++	struct list_head head;
++};
++
++/**
++ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
++ * @lock:     Lock guarding (de-)registration of notifier blocks. Note: This
++ *            lock does not need to be held for notifier calls, only
++ *            registration and deregistration.
++ * @refcount: The root of the RB-tree used for reference-counting enabled
++ *            events/notifications.
++ * @head:     The list of notifier heads for event/notification callbacks.
++ */
++struct ssam_nf {
++	struct mutex lock;
++	struct rb_root refcount;
++	struct ssam_nf_head head[SSH_NUM_EVENTS];
++};
++
++
++/* -- Event/async request completion system. -------------------------------- */
++
++struct ssam_cplt;
++
++/**
++ * struct ssam_event_item - Struct for event queuing and completion.
++ * @node:     The node in the queue.
++ * @rqid:     The request ID of the event.
++ * @event:    Actual event data.
++ */
++struct ssam_event_item {
++	struct list_head node;
++	u16 rqid;
++
++	struct ssam_event event;	/* must be last */
++};
++
++/**
++ * struct ssam_event_queue - Queue for completing received events.
++ * @cplt: Reference to the completion system on which this queue is active.
++ * @lock: The lock for any operation on the queue.
++ * @head: The list-head of the queue.
++ * @work: The &struct work_struct performing completion work for this queue.
++ */
++struct ssam_event_queue {
++	struct ssam_cplt *cplt;
++
++	spinlock_t lock;
++	struct list_head head;
++	struct work_struct work;
++};
++
++/**
++ * struct ssam_event_target - Set of queues for a single SSH target ID.
++ * @queue: The array of queues, one queue per event ID.
++ */
++struct ssam_event_target {
++	struct ssam_event_queue queue[SSH_NUM_EVENTS];
++};
++
++/**
++ * struct ssam_cplt - SSAM event/async request completion system.
++ * @dev:          The device with which this system is associated. Only used
++ *                for logging.
++ * @wq:           The &struct workqueue_struct on which all completion work
++ *                items are queued.
++ * @event:        Event completion management.
++ * @event.target: Array of &struct ssam_event_target, one for each target.
++ * @event.notif:  Notifier callbacks and event activation reference counting.
++ */
++struct ssam_cplt {
++	struct device *dev;
++	struct workqueue_struct *wq;
++
++	struct {
++		struct ssam_event_target target[SSH_NUM_TARGETS];
++		struct ssam_nf notif;
++	} event;
++};
++
++
++/* -- Main SSAM device structures. ------------------------------------------ */
++
++/**
++ * enum ssam_controller_state - State values for &struct ssam_controller.
++ * @SSAM_CONTROLLER_UNINITIALIZED:
++ *	The controller has not been initialized yet or has been deinitialized.
++ * @SSAM_CONTROLLER_INITIALIZED:
++ *	The controller is initialized, but has not been started yet.
++ * @SSAM_CONTROLLER_STARTED:
++ *	The controller has been started and is ready to use.
++ * @SSAM_CONTROLLER_STOPPED:
++ *	The controller has been stopped.
++ * @SSAM_CONTROLLER_SUSPENDED:
++ *	The controller has been suspended.
++ */
++enum ssam_controller_state {
++	SSAM_CONTROLLER_UNINITIALIZED,
++	SSAM_CONTROLLER_INITIALIZED,
++	SSAM_CONTROLLER_STARTED,
++	SSAM_CONTROLLER_STOPPED,
++	SSAM_CONTROLLER_SUSPENDED,
++};
++
++/**
++ * struct ssam_controller_caps - Controller device capabilities.
++ * @ssh_power_profile:             SSH power profile.
++ * @ssh_buffer_size:               SSH driver UART buffer size.
++ * @screen_on_sleep_idle_timeout:  SAM UART screen-on sleep idle timeout.
++ * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout.
++ * @d3_closes_handle:              SAM closes UART handle in D3.
++ *
++ * Controller and SSH device capabilities found in ACPI.
++ */
++struct ssam_controller_caps {
++	u32 ssh_power_profile;
++	u32 ssh_buffer_size;
++	u32 screen_on_sleep_idle_timeout;
++	u32 screen_off_sleep_idle_timeout;
++	u32 d3_closes_handle:1;
++};
++
++/**
++ * struct ssam_controller - SSAM controller device.
++ * @kref:  Reference count of the controller.
++ * @lock:  Main lock for the controller, used to guard state changes.
++ * @state: Controller state.
++ * @rtl:   Request transport layer for SSH I/O.
++ * @cplt:  Completion system for SSH/SSAM events and asynchronous requests.
++ * @counter:      Safe SSH message ID counters.
++ * @counter.seq:  Sequence ID counter.
++ * @counter.rqid: Request ID counter.
++ * @irq:          Wakeup IRQ resources.
++ * @irq.num:      The wakeup IRQ number.
++ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
++ * @caps: The controller device capabilities.
++ */
++struct ssam_controller {
++	struct kref kref;
++
++	struct rw_semaphore lock;
++	enum ssam_controller_state state;
++
++	struct ssh_rtl rtl;
++	struct ssam_cplt cplt;
++
++	struct {
++		struct ssh_seq_counter seq;
++		struct ssh_rqid_counter rqid;
++	} counter;
++
++	struct {
++		int num;
++		bool wakeup_enabled;
++	} irq;
++
++	struct ssam_controller_caps caps;
++};
++
++#define to_ssam_controller(ptr, member) \
++	container_of(ptr, struct ssam_controller, member)
++
++#define ssam_dbg(ctrl, fmt, ...)  rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_err(ctrl, fmt, ...)  rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++
++/**
++ * ssam_controller_receive_buf() - Provide input-data to the controller.
++ * @ctrl: The controller.
++ * @buf:  The input buffer.
++ * @n:    The number of bytes in the input buffer.
++ *
++ * Provide input data to be evaluated by the controller, which has been
++ * received via the lower-level transport.
++ *
++ * Return: Returns the number of bytes consumed, or, if the packet transport
++ * layer of the controller has been shut down, %-ESHUTDOWN.
++ */
++static inline
++int ssam_controller_receive_buf(struct ssam_controller *ctrl,
++				const unsigned char *buf, size_t n)
++{
++	return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
++}
++
++/**
++ * ssam_controller_write_wakeup() - Notify the controller that the underlying
++ * device has space available for data to be written.
++ * @ctrl: The controller.
++ */
++static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
++{
++	ssh_ptl_tx_wakeup_transfer(&ctrl->rtl.ptl);
++}
++
++int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
++int ssam_controller_start(struct ssam_controller *ctrl);
++void ssam_controller_shutdown(struct ssam_controller *ctrl);
++void ssam_controller_destroy(struct ssam_controller *ctrl);
++
++int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
++void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
++
++int ssam_irq_setup(struct ssam_controller *ctrl);
++void ssam_irq_free(struct ssam_controller *ctrl);
++int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
++void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
++
++void ssam_controller_lock(struct ssam_controller *c);
++void ssam_controller_unlock(struct ssam_controller *c);
++
++int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version);
++int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
++
++int ssam_controller_suspend(struct ssam_controller *ctrl);
++int ssam_controller_resume(struct ssam_controller *ctrl);
++
++#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+new file mode 100644
+index 000000000000..18e0e9e34e7b
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -0,0 +1,787 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Serial Hub (SSH) driver for communication with the Surface/System
++ * Aggregator Module (SSAM/SAM).
++ *
++ * Provides access to a SAM-over-SSH connected EC via a controller device.
++ * Handles communication via requests as well as enabling, disabling, and
++ * relaying of events.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/gpio/consumer.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/module.h>
++#include <linux/pm.h>
++#include <linux/serdev.h>
++#include <linux/sysfs.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include "controller.h"
++
++
++/* -- Static controller reference. ------------------------------------------ */
++
++/*
++ * Main controller reference. The corresponding lock must be held while
++ * accessing (reading/writing) the reference.
++ */
++static struct ssam_controller *__ssam_controller;
++static DEFINE_SPINLOCK(__ssam_controller_lock);
++
++/**
++ * ssam_get_controller() - Get reference to SSAM controller.
++ *
++ * Returns a reference to the SSAM controller of the system or %NULL if there
++ * is none, it hasn't been set up yet, or it has already been unregistered.
++ * This function automatically increments the reference count of the
++ * controller, thus the calling party must ensure that ssam_controller_put()
++ * is called when it doesn't need the controller any more.
++ */
++struct ssam_controller *ssam_get_controller(void)
++{
++	struct ssam_controller *ctrl;
++
++	spin_lock(&__ssam_controller_lock);
++
++	ctrl = __ssam_controller;
++	if (!ctrl)
++		goto out;
++
++	if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
++		ctrl = NULL;
++
++out:
++	spin_unlock(&__ssam_controller_lock);
++	return ctrl;
++}
++EXPORT_SYMBOL_GPL(ssam_get_controller);
++
++/**
++ * ssam_try_set_controller() - Try to set the main controller reference.
++ * @ctrl: The controller to which the reference should point.
++ *
++ * Set the main controller reference to the given pointer if the reference
++ * hasn't been set already.
++ *
++ * Return: Returns zero on success or %-EEXIST if the reference has already
++ * been set.
++ */
++static int ssam_try_set_controller(struct ssam_controller *ctrl)
++{
++	int status = 0;
++
++	spin_lock(&__ssam_controller_lock);
++	if (!__ssam_controller)
++		__ssam_controller = ctrl;
++	else
++		status = -EEXIST;
++	spin_unlock(&__ssam_controller_lock);
++
++	return status;
++}
++
++/**
++ * ssam_clear_controller() - Remove/clear the main controller reference.
++ *
++ * Clears the main controller reference, i.e. sets it to %NULL. This function
++ * should be called before the controller is shut down.
++ */
++static void ssam_clear_controller(void)
++{
++	spin_lock(&__ssam_controller_lock);
++	__ssam_controller = NULL;
++	spin_unlock(&__ssam_controller_lock);
++}
++
++/**
++ * ssam_client_link() - Link an arbitrary client device to the controller.
++ * @c: The controller to link to.
++ * @client: The client device.
++ *
++ * Link an arbitrary client device to the controller by creating a device link
++ * between it as consumer and the controller device as provider. This function
++ * can be used for non-SSAM devices (or SSAM devices not registered as child
++ * under the controller) to guarantee that the controller is valid for as long
++ * as the driver of the client device is bound, and that proper suspend and
++ * resume ordering is guaranteed.
++ *
++ * The device link does not have to be destructed manually. It is removed
++ * automatically once the driver of the client device unbinds.
++ *
++ * Return: Returns zero on success, %-ENODEV if the controller is not ready or
++ * going to be removed soon, or %-ENOMEM if the device link could not be
++ * created for other reasons.
++ */
++int ssam_client_link(struct ssam_controller *c, struct device *client)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
++	struct device_link *link;
++	struct device *ctrldev;
++
++	ssam_controller_statelock(c);
++
++	if (c->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_stateunlock(c);
++		return -ENODEV;
++	}
++
++	ctrldev = ssam_controller_device(c);
++	if (!ctrldev) {
++		ssam_controller_stateunlock(c);
++		return -ENODEV;
++	}
++
++	link = device_link_add(client, ctrldev, flags);
++	if (!link) {
++		ssam_controller_stateunlock(c);
++		return -ENOMEM;
++	}
++
++	/*
++	 * Return -ENODEV if supplier driver is on its way to be removed. In
++	 * this case, the controller won't be around for much longer and the
++	 * device link is not going to save us any more, as unbinding is
++	 * already in progress.
++	 */
++	if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
++		ssam_controller_stateunlock(c);
++		return -ENODEV;
++	}
++
++	ssam_controller_stateunlock(c);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_client_link);
++
++/**
++ * ssam_client_bind() - Bind an arbitrary client device to the controller.
++ * @client: The client device.
++ *
++ * Link an arbitrary client device to the controller by creating a device link
++ * between it as consumer and the main controller device as provider. This
++ * function can be used for non-SSAM devices to guarantee that the controller
++ * returned by this function is valid for as long as the driver of the client
++ * device is bound, and that proper suspend and resume ordering is guaranteed.
++ *
++ * This function does essentially the same as ssam_client_link(), except that
++ * it first fetches the main controller reference, then creates the link, and
++ * finally returns this reference. Note that this function does not increment
++ * the reference counter of the controller, as, due to the link, the
++ * controller lifetime is assured as long as the driver of the client device
++ * is bound.
++ *
++ * It is not valid to use the controller reference obtained by this method
++ * outside of the driver bound to the client device at the time of calling
++ * this function, without first incrementing the reference count of the
++ * controller via ssam_controller_get(). Even after doing this, care must be
++ * taken that requests are only submitted and notifiers are only
++ * (un-)registered when the controller is active and not suspended. In other
++ * words: The device link only lives as long as the client driver is bound and
++ * any guarantees enforced by this link (e.g. active controller state) can
++ * only be relied upon as long as this link exists and may need to be enforced
++ * in other ways afterwards.
++ *
++ * The created device link does not have to be destructed manually. It is
++ * removed automatically once the driver of the client device unbinds.
++ *
++ * Return: Returns the controller on success, an error pointer with %-ENODEV
++ * if the controller is not present, not ready or going to be removed soon, or
++ * %-ENOMEM if the device link could not be created for other reasons.
++ */
++struct ssam_controller *ssam_client_bind(struct device *client)
++{
++	struct ssam_controller *c;
++	int status;
++
++	c = ssam_get_controller();
++	if (!c)
++		return ERR_PTR(-ENODEV);
++
++	status = ssam_client_link(c, client);
++
++	/*
++	 * Note that we can drop our controller reference in both success and
++	 * failure cases: On success, we have bound the controller lifetime
++	 * inherently to the client driver lifetime, i.e. it the controller is
++	 * now guaranteed to outlive the client driver. On failure, we're not
++	 * going to use the controller any more.
++	 */
++	ssam_controller_put(c);
++
++	return status >= 0 ? c : ERR_PTR(status);
++}
++EXPORT_SYMBOL_GPL(ssam_client_bind);
++
++
++/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
++
++static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
++			    size_t n)
++{
++	struct ssam_controller *ctrl;
++
++	ctrl = serdev_device_get_drvdata(dev);
++	return ssam_controller_receive_buf(ctrl, buf, n);
++}
++
++static void ssam_write_wakeup(struct serdev_device *dev)
++{
++	ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
++}
++
++static const struct serdev_device_ops ssam_serdev_ops = {
++	.receive_buf = ssam_receive_buf,
++	.write_wakeup = ssam_write_wakeup,
++};
++
++
++/* -- SysFS and misc. ------------------------------------------------------- */
++
++static int ssam_log_firmware_version(struct ssam_controller *ctrl)
++{
++	u32 version, a, b, c;
++	int status;
++
++	status = ssam_get_firmware_version(ctrl, &version);
++	if (status)
++		return status;
++
++	a = (version >> 24) & 0xff;
++	b = ((version >> 8) & 0xffff);
++	c = version & 0xff;
++
++	ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
++	return 0;
++}
++
++static ssize_t firmware_version_show(struct device *dev,
++				     struct device_attribute *attr, char *buf)
++{
++	struct ssam_controller *ctrl = dev_get_drvdata(dev);
++	u32 version, a, b, c;
++	int status;
++
++	status = ssam_get_firmware_version(ctrl, &version);
++	if (status < 0)
++		return status;
++
++	a = (version >> 24) & 0xff;
++	b = ((version >> 8) & 0xffff);
++	c = version & 0xff;
++
++	return sysfs_emit(buf, "%u.%u.%u\n", a, b, c);
++}
++static DEVICE_ATTR_RO(firmware_version);
++
++static struct attribute *ssam_sam_attrs[] = {
++	&dev_attr_firmware_version.attr,
++	NULL
++};
++
++static const struct attribute_group ssam_sam_group = {
++	.name = "sam",
++	.attrs = ssam_sam_attrs,
++};
++
++
++/* -- ACPI based device setup. ---------------------------------------------- */
++
++static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
++						  void *ctx)
++{
++	struct serdev_device *serdev = ctx;
++	struct acpi_resource_common_serialbus *serial;
++	struct acpi_resource_uart_serialbus *uart;
++	bool flow_control;
++	int status = 0;
++
++	if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
++		return AE_OK;
++
++	serial = &rsc->data.common_serial_bus;
++	if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
++		return AE_OK;
++
++	uart = &rsc->data.uart_serial_bus;
++
++	/* Set up serdev device. */
++	serdev_device_set_baudrate(serdev, uart->default_baud_rate);
++
++	/* serdev currently only supports RTSCTS flow control. */
++	if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) {
++		dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n",
++			 uart->flow_control);
++	}
++
++	/* Set RTSCTS flow control. */
++	flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
++	serdev_device_set_flow_control(serdev, flow_control);
++
++	/* serdev currently only supports EVEN/ODD parity. */
++	switch (uart->parity) {
++	case ACPI_UART_PARITY_NONE:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
++		break;
++	case ACPI_UART_PARITY_EVEN:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
++		break;
++	case ACPI_UART_PARITY_ODD:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
++		break;
++	default:
++		dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n",
++			 uart->parity);
++		break;
++	}
++
++	if (status) {
++		dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n",
++			uart->parity, status);
++		return AE_ERROR;
++	}
++
++	/* We've found the resource and are done. */
++	return AE_CTRL_TERMINATE;
++}
++
++static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
++					      struct serdev_device *serdev)
++{
++	return acpi_walk_resources(handle, METHOD_NAME__CRS,
++				   ssam_serdev_setup_via_acpi_crs, serdev);
++}
++
++
++/* -- Power management. ----------------------------------------------------- */
++
++static void ssam_serial_hub_shutdown(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to disable notifiers, signal display-off and D0-exit, ignore any
++	 * errors.
++	 *
++	 * Note: It has not been established yet if this is actually
++	 * necessary/useful for shutdown.
++	 */
++
++	status = ssam_notifier_disable_registered(c);
++	if (status) {
++		ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
++			 status);
++	}
++
++	status = ssam_ctrl_notif_display_off(c);
++	if (status)
++		ssam_err(c, "pm: display-off notification failed: %d\n", status);
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status)
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++}
++
++#ifdef CONFIG_PM_SLEEP
++
++static int ssam_serial_hub_pm_prepare(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal display-off, This will quiesce events.
++	 *
++	 * Note: Signaling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available,
++	 * signal it here.
++	 */
++
++	status = ssam_ctrl_notif_display_off(c);
++	if (status)
++		ssam_err(c, "pm: display-off notification failed: %d\n", status);
++
++	return status;
++}
++
++static void ssam_serial_hub_pm_complete(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal display-on. This will restore events.
++	 *
++	 * Note: Signaling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available,
++	 * signal it here.
++	 */
++
++	status = ssam_ctrl_notif_display_on(c);
++	if (status)
++		ssam_err(c, "pm: display-on notification failed: %d\n", status);
++}
++
++static int ssam_serial_hub_pm_suspend(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
++	 * error.
++	 */
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		goto err_notif;
++	}
++
++	status = ssam_irq_arm_for_wakeup(c);
++	if (status)
++		goto err_irq;
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++
++err_irq:
++	ssam_ctrl_notif_d0_entry(c);
++err_notif:
++	ssam_ctrl_notif_display_on(c);
++	return status;
++}
++
++static int ssam_serial_hub_pm_resume(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	WARN_ON(ssam_controller_resume(c));
++
++	/*
++	 * Try to disable IRQ wakeup (if specified) and signal D0-entry. In
++	 * case of errors, log them and try to restore normal operation state
++	 * as far as possible.
++	 *
++	 * Note: Signaling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available,
++	 * signal it here.
++	 */
++
++	ssam_irq_disarm_wakeup(c);
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
++
++	return 0;
++}
++
++static int ssam_serial_hub_pm_freeze(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * During hibernation image creation, we only have to ensure that the
++	 * EC doesn't send us any events. This is done via the display-off
++	 * and D0-exit notifications. Note that this sets up the wakeup IRQ
++	 * on the EC side, however, we have disabled it by default on our side
++	 * and won't enable it here.
++	 *
++	 * See ssam_serial_hub_poweroff() for more details on the hibernation
++	 * process.
++	 */
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		ssam_ctrl_notif_display_on(c);
++		return status;
++	}
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++}
++
++static int ssam_serial_hub_pm_thaw(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	WARN_ON(ssam_controller_resume(c));
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++
++	return status;
++}
++
++static int ssam_serial_hub_pm_poweroff(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * When entering hibernation and powering off the system, the EC, at
++	 * least on some models, may disable events. Without us taking care of
++	 * that, this leads to events not being enabled/restored when the
++	 * system resumes from hibernation, resulting SAM-HID subsystem devices
++	 * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
++	 * gone, etc.
++	 *
++	 * To avoid these issues, we disable all registered events here (this is
++	 * likely not actually required) and restore them during the drivers PM
++	 * restore callback.
++	 *
++	 * Wakeup from the EC interrupt is not supported during hibernation,
++	 * so don't arm the IRQ here.
++	 */
++
++	status = ssam_notifier_disable_registered(c);
++	if (status) {
++		ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
++			 status);
++		return status;
++	}
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		ssam_notifier_restore_registered(c);
++		return status;
++	}
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++}
++
++static int ssam_serial_hub_pm_restore(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Ignore but log errors, try to restore state as much as possible in
++	 * case of failures. See ssam_serial_hub_poweroff() for more details on
++	 * the hibernation process.
++	 */
++
++	WARN_ON(ssam_controller_resume(c));
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
++
++	ssam_notifier_restore_registered(c);
++	return 0;
++}
++
++static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
++	.prepare  = ssam_serial_hub_pm_prepare,
++	.complete = ssam_serial_hub_pm_complete,
++	.suspend  = ssam_serial_hub_pm_suspend,
++	.resume   = ssam_serial_hub_pm_resume,
++	.freeze   = ssam_serial_hub_pm_freeze,
++	.thaw     = ssam_serial_hub_pm_thaw,
++	.poweroff = ssam_serial_hub_pm_poweroff,
++	.restore  = ssam_serial_hub_pm_restore,
++};
++
++#else /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
++
++#endif /* CONFIG_PM_SLEEP */
++
++
++/* -- Device/driver setup. -------------------------------------------------- */
++
++static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
++static const struct acpi_gpio_params gpio_ssam_wakeup     = { 1, 0, false };
++
++static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
++	{ "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
++	{ "ssam_wakeup-gpio",     &gpio_ssam_wakeup,     1 },
++	{ },
++};
++
++static int ssam_serial_hub_probe(struct serdev_device *serdev)
++{
++	struct ssam_controller *ctrl;
++	acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
++	acpi_status astatus;
++	int status;
++
++	if (gpiod_count(&serdev->dev, NULL) < 0)
++		return -ENODEV;
++
++	status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
++	if (status)
++		return status;
++
++	/* Allocate controller. */
++	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
++	if (!ctrl)
++		return -ENOMEM;
++
++	/* Initialize controller. */
++	status = ssam_controller_init(ctrl, serdev);
++	if (status)
++		goto err_ctrl_init;
++
++	ssam_controller_lock(ctrl);
++
++	/* Set up serdev device. */
++	serdev_device_set_drvdata(serdev, ctrl);
++	serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
++	status = serdev_device_open(serdev);
++	if (status)
++		goto err_devopen;
++
++	astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
++	if (ACPI_FAILURE(astatus)) {
++		status = -ENXIO;
++		goto err_devinit;
++	}
++
++	/* Start controller. */
++	status = ssam_controller_start(ctrl);
++	if (status)
++		goto err_devinit;
++
++	ssam_controller_unlock(ctrl);
++
++	/*
++	 * Initial SAM requests: Log version and notify default/init power
++	 * states.
++	 */
++	status = ssam_log_firmware_version(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = ssam_ctrl_notif_d0_entry(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = ssam_ctrl_notif_display_on(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
++	if (status)
++		goto err_initrq;
++
++	/* Set up IRQ. */
++	status = ssam_irq_setup(ctrl);
++	if (status)
++		goto err_irq;
++
++	/* Finally, set main controller reference. */
++	status = ssam_try_set_controller(ctrl);
++	if (WARN_ON(status))	/* Currently, we're the only provider. */
++		goto err_mainref;
++
++	/*
++	 * TODO: The EC can wake up the system via the associated GPIO interrupt
++	 *       in multiple situations. One of which is the remaining battery
++	 *       capacity falling below a certain threshold. Normally, we should
++	 *       use the device_init_wakeup function, however, the EC also seems
++	 *       to have other reasons for waking up the system and it seems
++	 *       that Windows has additional checks whether the system should be
++	 *       resumed. In short, this causes some spurious unwanted wake-ups.
++	 *       For now let's thus default power/wakeup to false.
++	 */
++	device_set_wakeup_capable(&serdev->dev, true);
++	acpi_walk_dep_device_list(ssh);
++
++	return 0;
++
++err_mainref:
++	ssam_irq_free(ctrl);
++err_irq:
++	sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
++err_initrq:
++	ssam_controller_lock(ctrl);
++	ssam_controller_shutdown(ctrl);
++err_devinit:
++	serdev_device_close(serdev);
++err_devopen:
++	ssam_controller_destroy(ctrl);
++	ssam_controller_unlock(ctrl);
++err_ctrl_init:
++	kfree(ctrl);
++	return status;
++}
++
++static void ssam_serial_hub_remove(struct serdev_device *serdev)
++{
++	struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
++	int status;
++
++	/* Clear static reference so that no one else can get a new one. */
++	ssam_clear_controller();
++
++	/* Disable and free IRQ. */
++	ssam_irq_free(ctrl);
++
++	sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
++	ssam_controller_lock(ctrl);
++
++	/* Act as if suspending to silence events. */
++	status = ssam_ctrl_notif_display_off(ctrl);
++	if (status) {
++		dev_err(&serdev->dev, "display-off notification failed: %d\n",
++			status);
++	}
++
++	status = ssam_ctrl_notif_d0_exit(ctrl);
++	if (status) {
++		dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
++			status);
++	}
++
++	/* Shut down controller and remove serdev device reference from it. */
++	ssam_controller_shutdown(ctrl);
++
++	/* Shut down actual transport. */
++	serdev_device_wait_until_sent(serdev, 0);
++	serdev_device_close(serdev);
++
++	/* Drop our controller reference. */
++	ssam_controller_unlock(ctrl);
++	ssam_controller_put(ctrl);
++
++	device_set_wakeup_capable(&serdev->dev, false);
++}
++
++static const struct acpi_device_id ssam_serial_hub_match[] = {
++	{ "MSHW0084", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
++
++static struct serdev_device_driver ssam_serial_hub = {
++	.probe = ssam_serial_hub_probe,
++	.remove = ssam_serial_hub_remove,
++	.driver = {
++		.name = "surface_serial_hub",
++		.acpi_match_table = ssam_serial_hub_match,
++		.pm = &ssam_serial_hub_pm_ops,
++		.shutdown = ssam_serial_hub_shutdown,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_serdev_device_driver(ssam_serial_hub);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
+new file mode 100644
+index 000000000000..1221f642dda1
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_msgb.h
+@@ -0,0 +1,205 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH message builder functions.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
++#define _SURFACE_AGGREGATOR_SSH_MSGB_H
++
++#include <asm/unaligned.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++/**
++ * struct msgbuf - Buffer struct to construct SSH messages.
++ * @begin: Pointer to the beginning of the allocated buffer space.
++ * @end:   Pointer to the end (one past last element) of the allocated buffer
++ *         space.
++ * @ptr:   Pointer to the first free element in the buffer.
++ */
++struct msgbuf {
++	u8 *begin;
++	u8 *end;
++	u8 *ptr;
++};
++
++/**
++ * msgb_init() - Initialize the given message buffer struct.
++ * @msgb: The buffer struct to initialize
++ * @ptr:  Pointer to the underlying memory by which the buffer will be backed.
++ * @cap:  Size of the underlying memory.
++ *
++ * Initialize the given message buffer struct using the provided memory as
++ * backing.
++ */
++static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
++{
++	msgb->begin = ptr;
++	msgb->end = ptr + cap;
++	msgb->ptr = ptr;
++}
++
++/**
++ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
++ * @msgb: The message buffer.
++ */
++static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
++{
++	return msgb->ptr - msgb->begin;
++}
++
++static inline void __msgb_push_u8(struct msgbuf *msgb, u8 value)
++{
++	*msgb->ptr = value;
++	msgb->ptr += sizeof(u8);
++}
++
++static inline void __msgb_push_u16(struct msgbuf *msgb, u16 value)
++{
++	put_unaligned_le16(value, msgb->ptr);
++	msgb->ptr += sizeof(u16);
++}
++
++/**
++ * msgb_push_u16() - Push a u16 value to the buffer.
++ * @msgb:  The message buffer.
++ * @value: The value to push to the buffer.
++ */
++static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
++{
++	if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
++		return;
++
++	__msgb_push_u16(msgb, value);
++}
++
++/**
++ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
++ * @msgb: The message buffer.
++ */
++static inline void msgb_push_syn(struct msgbuf *msgb)
++{
++	msgb_push_u16(msgb, SSH_MSG_SYN);
++}
++
++/**
++ * msgb_push_buf() - Push raw data to the buffer.
++ * @msgb: The message buffer.
++ * @buf:  The data to push to the buffer.
++ * @len:  The length of the data to push to the buffer.
++ */
++static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
++{
++	msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
++}
++
++/**
++ * msgb_push_crc() - Compute CRC and push it to the buffer.
++ * @msgb: The message buffer.
++ * @buf:  The data for which the CRC should be computed.
++ * @len:  The length of the data for which the CRC should be computed.
++ */
++static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
++{
++	msgb_push_u16(msgb, ssh_crc(buf, len));
++}
++
++/**
++ * msgb_push_frame() - Push a SSH message frame header to the buffer.
++ * @msgb: The message buffer
++ * @ty:   The type of the frame.
++ * @len:  The length of the payload of the frame.
++ * @seq:  The sequence ID of the frame/packet.
++ */
++static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
++{
++	u8 *const begin = msgb->ptr;
++
++	if (WARN_ON(msgb->ptr + sizeof(struct ssh_frame) > msgb->end))
++		return;
++
++	__msgb_push_u8(msgb, ty);	/* Frame type. */
++	__msgb_push_u16(msgb, len);	/* Frame payload length. */
++	__msgb_push_u8(msgb, seq);	/* Frame sequence ID. */
++
++	msgb_push_crc(msgb, begin, msgb->ptr - begin);
++}
++
++/**
++ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
++ * @msgb: The message buffer
++ * @seq:  The sequence ID of the frame/packet to be ACKed.
++ */
++static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
++{
++	/* SYN. */
++	msgb_push_syn(msgb);
++
++	/* ACK-type frame + CRC. */
++	msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
++
++	/* Payload CRC (ACK-type frames do not have a payload). */
++	msgb_push_crc(msgb, msgb->ptr, 0);
++}
++
++/**
++ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
++ * @msgb: The message buffer
++ */
++static inline void msgb_push_nak(struct msgbuf *msgb)
++{
++	/* SYN. */
++	msgb_push_syn(msgb);
++
++	/* NAK-type frame + CRC. */
++	msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
++
++	/* Payload CRC (ACK-type frames do not have a payload). */
++	msgb_push_crc(msgb, msgb->ptr, 0);
++}
++
++/**
++ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
++ * @msgb: The message buffer.
++ * @seq:  The sequence ID (SEQ) of the frame/packet.
++ * @rqid: The request ID (RQID) of the request contained in the frame.
++ * @rqst: The request to wrap in the frame.
++ */
++static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
++				 const struct ssam_request *rqst)
++{
++	const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
++	u8 *cmd;
++
++	/* SYN. */
++	msgb_push_syn(msgb);
++
++	/* Command frame + CRC. */
++	msgb_push_frame(msgb, type, sizeof(struct ssh_command) + rqst->length, seq);
++
++	/* Frame payload: Command struct + payload. */
++	if (WARN_ON(msgb->ptr + sizeof(struct ssh_command) > msgb->end))
++		return;
++
++	cmd = msgb->ptr;
++
++	__msgb_push_u8(msgb, SSH_PLD_TYPE_CMD);		/* Payload type. */
++	__msgb_push_u8(msgb, rqst->target_category);	/* Target category. */
++	__msgb_push_u8(msgb, rqst->target_id);		/* Target ID (out). */
++	__msgb_push_u8(msgb, 0x00);			/* Target ID (in). */
++	__msgb_push_u8(msgb, rqst->instance_id);	/* Instance ID. */
++	__msgb_push_u16(msgb, rqid);			/* Request ID. */
++	__msgb_push_u8(msgb, rqst->command_id);		/* Command ID. */
++
++	/* Command payload. */
++	msgb_push_buf(msgb, rqst->payload, rqst->length);
++
++	/* CRC for command struct + payload. */
++	msgb_push_crc(msgb, cmd, msgb->ptr - cmd);
++}
++
++#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+new file mode 100644
+index 000000000000..66e38fdc7963
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -0,0 +1,1710 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH packet transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/atomic.h>
++#include <linux/jiffies.h>
++#include <linux/kfifo.h>
++#include <linux/kref.h>
++#include <linux/kthread.h>
++#include <linux/ktime.h>
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/lockdep.h>
++#include <linux/serdev.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "ssh_msgb.h"
++#include "ssh_packet_layer.h"
++#include "ssh_parser.h"
++
++/*
++ * To simplify reasoning about the code below, we define a few concepts. The
++ * system below is similar to a state-machine for packets, however, there are
++ * too many states to explicitly write them down. To (somewhat) manage the
++ * states and packets we rely on flags, reference counting, and some simple
++ * concepts. State transitions are triggered by actions.
++ *
++ * >> Actions <<
++ *
++ * - submit
++ * - transmission start (process next item in queue)
++ * - transmission finished (guaranteed to never be parallel to transmission
++ *   start)
++ * - ACK received
++ * - NAK received (this is equivalent to issuing re-submit for all pending
++ *   packets)
++ * - timeout (this is equivalent to re-issuing a submit or canceling)
++ * - cancel (non-pending and pending)
++ *
++ * >> Data Structures, Packet Ownership, General Overview <<
++ *
++ * The code below employs two main data structures: The packet queue,
++ * containing all packets scheduled for transmission, and the set of pending
++ * packets, containing all packets awaiting an ACK.
++ *
++ * Shared ownership of a packet is controlled via reference counting. Inside
++ * the transport system are a total of five packet owners:
++ *
++ * - the packet queue,
++ * - the pending set,
++ * - the transmitter thread,
++ * - the receiver thread (via ACKing), and
++ * - the timeout work item.
++ *
++ * Normal operation is as follows: The initial reference of the packet is
++ * obtained by submitting the packet and queuing it. The receiver thread takes
++ * packets from the queue. By doing this, it does not increment the refcount
++ * but takes over the reference (removing it from the queue). If the packet is
++ * sequenced (i.e. needs to be ACKed by the client), the transmitter thread
++ * sets-up the timeout and adds the packet to the pending set before starting
++ * to transmit it. As the timeout is handled by a reaper task, no additional
++ * reference for it is needed. After the transmit is done, the reference held
++ * by the transmitter thread is dropped. If the packet is unsequenced (i.e.
++ * does not need an ACK), the packet is completed by the transmitter thread
++ * before dropping that reference.
++ *
++ * On receival of an ACK, the receiver thread removes and obtains the
++ * reference to the packet from the pending set. The receiver thread will then
++ * complete the packet and drop its reference.
++ *
++ * On receival of a NAK, the receiver thread re-submits all currently pending
++ * packets.
++ *
++ * Packet timeouts are detected by the timeout reaper. This is a task,
++ * scheduled depending on the earliest packet timeout expiration date,
++ * checking all currently pending packets if their timeout has expired. If the
++ * timeout of a packet has expired, it is re-submitted and the number of tries
++ * of this packet is incremented. If this number reaches its limit, the packet
++ * will be completed with a failure.
++ *
++ * On transmission failure (such as repeated packet timeouts), the completion
++ * callback is immediately run by on thread on which the error was detected.
++ *
++ * To ensure that a packet eventually leaves the system it is marked as
++ * "locked" directly before it is going to be completed or when it is
++ * canceled. Marking a packet as "locked" has the effect that passing and
++ * creating new references of the packet is disallowed. This means that the
++ * packet cannot be added to the queue, the pending set, and the timeout, or
++ * be picked up by the transmitter thread or receiver thread. To remove a
++ * packet from the system it has to be marked as locked and subsequently all
++ * references from the data structures (queue, pending) have to be removed.
++ * References held by threads will eventually be dropped automatically as
++ * their execution progresses.
++ *
++ * Note that the packet completion callback is, in case of success and for a
++ * sequenced packet, guaranteed to run on the receiver thread, thus providing
++ * a way to reliably identify responses to the packet. The packet completion
++ * callback is only run once and it does not indicate that the packet has
++ * fully left the system (for this, one should rely on the release method,
++ * triggered when the reference count of the packet reaches zero). In case of
++ * re-submission (and with somewhat unlikely timing), it may be possible that
++ * the packet is being re-transmitted while the completion callback runs.
++ * Completion will occur both on success and internal error, as well as when
++ * the packet is canceled.
++ *
++ * >> Flags <<
++ *
++ * Flags are used to indicate the state and progression of a packet. Some flags
++ * have stricter guarantees than other:
++ *
++ * - locked
++ *   Indicates if the packet is locked. If the packet is locked, passing and/or
++ *   creating additional references to the packet is forbidden. The packet thus
++ *   may not be queued, dequeued, or removed or added to the pending set. Note
++ *   that the packet state flags may still change (e.g. it may be marked as
++ *   ACKed, transmitted, ...).
++ *
++ * - completed
++ *   Indicates if the packet completion callback has been executed or is about
++ *   to be executed. This flag is used to ensure that the packet completion
++ *   callback is only run once.
++ *
++ * - queued
++ *   Indicates if a packet is present in the submission queue or not. This flag
++ *   must only be modified with the queue lock held, and must be coherent to the
++ *   presence of the packet in the queue.
++ *
++ * - pending
++ *   Indicates if a packet is present in the set of pending packets or not.
++ *   This flag must only be modified with the pending lock held, and must be
++ *   coherent to the presence of the packet in the pending set.
++ *
++ * - transmitting
++ *   Indicates if the packet is currently transmitting. In case of
++ *   re-transmissions, it is only safe to wait on the "transmitted" completion
++ *   after this flag has been set. The completion will be set both in success
++ *   and error case.
++ *
++ * - transmitted
++ *   Indicates if the packet has been transmitted. This flag is not cleared by
++ *   the system, thus it indicates the first transmission only.
++ *
++ * - acked
++ *   Indicates if the packet has been acknowledged by the client. There are no
++ *   other guarantees given. For example, the packet may still be canceled
++ *   and/or the completion may be triggered an error even though this bit is
++ *   set. Rely on the status provided to the completion callback instead.
++ *
++ * - canceled
++ *   Indicates if the packet has been canceled from the outside. There are no
++ *   other guarantees given. Specifically, the packet may be completed by
++ *   another part of the system before the cancellation attempts to complete it.
++ *
++ * >> General Notes <<
++ *
++ * - To avoid deadlocks, if both queue and pending locks are required, the
++ *   pending lock must be acquired before the queue lock.
++ *
++ * - The packet priority must be accessed only while holding the queue lock.
++ *
++ * - The packet timestamp must be accessed only while holding the pending
++ *   lock.
++ */
++
++/*
++ * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
++ *
++ * Maximum number of transmission attempts per sequenced packet in case of
++ * time-outs. Must be smaller than 16. If the packet times out after this
++ * amount of tries, the packet will be completed with %-ETIMEDOUT as status
++ * code.
++ */
++#define SSH_PTL_MAX_PACKET_TRIES		3
++
++/*
++ * SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
++ *
++ * Timeout in jiffies for packet transmission via the underlying serial
++ * device. If transmitting the packet takes longer than this timeout, the
++ * packet will be completed with -ETIMEDOUT. It will not be re-submitted.
++ */
++#define SSH_PTL_TX_TIMEOUT			HZ
++
++/*
++ * SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
++ *
++ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
++ * time-frame after starting transmission, the packet will be re-submitted.
++ */
++#define SSH_PTL_PACKET_TIMEOUT			ms_to_ktime(1000)
++
++/*
++ * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
++ *
++ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
++ * direct re-scheduling of reaper work_struct.
++ */
++#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION	ms_to_ktime(max(2000 / HZ, 50))
++
++/*
++ * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
++ *
++ * Maximum number of sequenced packets concurrently waiting for an ACK.
++ * Packets marked as blocking will not be transmitted while this limit is
++ * reached.
++ */
++#define SSH_PTL_MAX_PENDING			1
++
++/*
++ * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
++ */
++#define SSH_PTL_RX_BUF_LEN			4096
++
++/*
++ * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
++ */
++#define SSH_PTL_RX_FIFO_LEN			4096
++
++static void __ssh_ptl_packet_release(struct kref *kref)
++{
++	struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
++
++	ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
++	p->ops->release(p);
++}
++
++/**
++ * ssh_packet_get() - Increment reference count of packet.
++ * @packet: The packet to increment the reference count of.
++ *
++ * Increments the reference count of the given packet. See ssh_packet_put()
++ * for the counter-part of this function.
++ *
++ * Return: Returns the packet provided as input.
++ */
++struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
++{
++	if (packet)
++		kref_get(&packet->refcnt);
++	return packet;
++}
++EXPORT_SYMBOL_GPL(ssh_packet_get);
++
++/**
++ * ssh_packet_put() - Decrement reference count of packet.
++ * @packet: The packet to decrement the reference count of.
++ *
++ * If the reference count reaches zero, the ``release`` callback specified in
++ * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
++ * called.
++ *
++ * See ssh_packet_get() for the counter-part of this function.
++ */
++void ssh_packet_put(struct ssh_packet *packet)
++{
++	if (packet)
++		kref_put(&packet->refcnt, __ssh_ptl_packet_release);
++}
++EXPORT_SYMBOL_GPL(ssh_packet_put);
++
++static u8 ssh_packet_get_seq(struct ssh_packet *packet)
++{
++	return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
++}
++
++/**
++ * ssh_packet_init() - Initialize SSH packet.
++ * @packet:   The packet to initialize.
++ * @type:     Type-flags of the packet.
++ * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
++ * @ops:      Packet operations.
++ *
++ * Initializes the given SSH packet. Sets the transmission buffer pointer to
++ * %NULL and the transmission buffer length to zero. For data-type packets,
++ * this buffer has to be set separately via ssh_packet_set_data() before
++ * submission, and must contain a valid SSH message, i.e. frame with optional
++ * payload of any type.
++ */
++void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
++		     u8 priority, const struct ssh_packet_ops *ops)
++{
++	kref_init(&packet->refcnt);
++
++	packet->ptl = NULL;
++	INIT_LIST_HEAD(&packet->queue_node);
++	INIT_LIST_HEAD(&packet->pending_node);
++
++	packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
++	packet->priority = priority;
++	packet->timestamp = KTIME_MAX;
++
++	packet->data.ptr = NULL;
++	packet->data.len = 0;
++
++	packet->ops = ops;
++}
++
++/**
++ * ssh_ctrl_packet_alloc() - Allocate control packet.
++ * @packet: Where the pointer to the newly allocated packet should be stored.
++ * @buffer: The buffer corresponding to this packet.
++ * @flags:  Flags used for allocation.
++ *
++ * Allocates a packet and corresponding transport buffer. Sets the packet's
++ * buffer reference to the allocated buffer. The packet must be freed via
++ * ssh_ctrl_packet_free(), which will also free the corresponding buffer. The
++ * corresponding buffer must not be freed separately. Intended to be used with
++ * %ssh_ptl_ctrl_packet_ops as packet operations.
++ *
++ * Return: Returns zero on success, %-ENOMEM if the allocation failed.
++ */
++static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
++				 struct ssam_span *buffer, gfp_t flags)
++{
++	*packet = kzalloc(sizeof(**packet) + SSH_MSG_LEN_CTRL, flags);
++	if (!*packet)
++		return -ENOMEM;
++
++	buffer->ptr = (u8 *)(*packet + 1);
++	buffer->len = SSH_MSG_LEN_CTRL;
++
++	return 0;
++}
++
++/**
++ * ssh_ctrl_packet_free() - Free control packet.
++ * @p: The packet to free.
++ */
++static void ssh_ctrl_packet_free(struct ssh_packet *p)
++{
++	kfree(p);
++}
++
++static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
++	.complete = NULL,
++	.release = ssh_ctrl_packet_free,
++};
++
++static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
++				       ktime_t expires)
++{
++	unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
++	ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
++
++	spin_lock(&ptl->rtx_timeout.lock);
++
++	/* Re-adjust / schedule reaper only if it is above resolution delta. */
++	if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
++		ptl->rtx_timeout.expires = expires;
++		mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
++	}
++
++	spin_unlock(&ptl->rtx_timeout.lock);
++}
++
++/* Must be called with queue lock held. */
++static void ssh_packet_next_try(struct ssh_packet *p)
++{
++	u8 base = ssh_packet_priority_get_base(p->priority);
++	u8 try = ssh_packet_priority_get_try(p->priority);
++
++	lockdep_assert_held(&p->ptl->queue.lock);
++
++	p->priority = __SSH_PACKET_PRIORITY(base, try + 1);
++}
++
++/* Must be called with queue lock held. */
++static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
++{
++	struct list_head *head;
++	struct ssh_packet *q;
++
++	lockdep_assert_held(&p->ptl->queue.lock);
++
++	/*
++	 * We generally assume that there are less control (ACK/NAK) packets
++	 * and re-submitted data packets as there are normal data packets (at
++	 * least in situations in which many packets are queued; if there
++	 * aren't many packets queued the decision on how to iterate should be
++	 * basically irrelevant; the number of control/data packets is more or
++	 * less limited via the maximum number of pending packets). Thus, when
++	 * inserting a control or re-submitted data packet, (determined by
++	 * their priority), we search from front to back. Normal data packets
++	 * are, usually queued directly at the tail of the queue, so for those
++	 * search from back to front.
++	 */
++
++	if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) {
++		list_for_each(head, &p->ptl->queue.head) {
++			q = list_entry(head, struct ssh_packet, queue_node);
++
++			if (q->priority < p->priority)
++				break;
++		}
++	} else {
++		list_for_each_prev(head, &p->ptl->queue.head) {
++			q = list_entry(head, struct ssh_packet, queue_node);
++
++			if (q->priority >= p->priority) {
++				head = head->next;
++				break;
++			}
++		}
++	}
++
++	return head;
++}
++
++/* Must be called with queue lock held. */
++static int __ssh_ptl_queue_push(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++	struct list_head *head;
++
++	lockdep_assert_held(&ptl->queue.lock);
++
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return -ESHUTDOWN;
++
++	/* Avoid further transitions when canceling/completing. */
++	if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
++		return -EINVAL;
++
++	/* If this packet has already been queued, do not add it. */
++	if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
++		return -EALREADY;
++
++	head = __ssh_ptl_queue_find_entrypoint(packet);
++
++	list_add_tail(&ssh_packet_get(packet)->queue_node, head);
++	return 0;
++}
++
++static int ssh_ptl_queue_push(struct ssh_packet *packet)
++{
++	int status;
++
++	spin_lock(&packet->ptl->queue.lock);
++	status = __ssh_ptl_queue_push(packet);
++	spin_unlock(&packet->ptl->queue.lock);
++
++	return status;
++}
++
++static void ssh_ptl_queue_remove(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	spin_lock(&ptl->queue.lock);
++
++	if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
++		spin_unlock(&ptl->queue.lock);
++		return;
++	}
++
++	list_del(&packet->queue_node);
++
++	spin_unlock(&ptl->queue.lock);
++	ssh_packet_put(packet);
++}
++
++static void ssh_ptl_pending_push(struct ssh_packet *p)
++{
++	struct ssh_ptl *ptl = p->ptl;
++	const ktime_t timestamp = ktime_get_coarse_boottime();
++	const ktime_t timeout = ptl->rtx_timeout.timeout;
++
++	/*
++	 * Note: We can get the time for the timestamp before acquiring the
++	 * lock as this is the only place we're setting it and this function
++	 * is called only from the transmitter thread. Thus it is not possible
++	 * to overwrite the timestamp with an outdated value below.
++	 */
++
++	spin_lock(&ptl->pending.lock);
++
++	/* If we are canceling/completing this packet, do not add it. */
++	if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) {
++		spin_unlock(&ptl->pending.lock);
++		return;
++	}
++
++	/*
++	 * On re-submission, the packet has already been added the pending
++	 * set. We still need to update the timestamp as the packet timeout is
++	 * reset for each (re-)submission.
++	 */
++	p->timestamp = timestamp;
++
++	/* In case it is already pending (e.g. re-submission), do not add it. */
++	if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) {
++		atomic_inc(&ptl->pending.count);
++		list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head);
++	}
++
++	spin_unlock(&ptl->pending.lock);
++
++	/* Arm/update timeout reaper. */
++	ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout);
++}
++
++static void ssh_ptl_pending_remove(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	spin_lock(&ptl->pending.lock);
++
++	if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
++		spin_unlock(&ptl->pending.lock);
++		return;
++	}
++
++	list_del(&packet->pending_node);
++	atomic_dec(&ptl->pending.count);
++
++	spin_unlock(&ptl->pending.lock);
++
++	ssh_packet_put(packet);
++}
++
++/* Warning: Does not check/set "completed" bit. */
++static void __ssh_ptl_complete(struct ssh_packet *p, int status)
++{
++	struct ssh_ptl *ptl = READ_ONCE(p->ptl);
++
++	ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
++
++	if (p->ops->complete)
++		p->ops->complete(p, status);
++}
++
++static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
++{
++	/*
++	 * A call to this function should in general be preceded by
++	 * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
++	 * packet to the structures it's going to be removed from.
++	 *
++	 * The set_bit call does not need explicit memory barriers as the
++	 * implicit barrier of the test_and_set_bit() call below ensure that the
++	 * flag is visible before we actually attempt to remove the packet.
++	 */
++
++	if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++		return;
++
++	ssh_ptl_queue_remove(p);
++	ssh_ptl_pending_remove(p);
++
++	__ssh_ptl_complete(p, status);
++}
++
++static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
++		return !atomic_read(&ptl->pending.count);
++
++	/* We can always process non-blocking packets. */
++	if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
++		return true;
++
++	/* If we are already waiting for this packet, send it again. */
++	if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
++		return true;
++
++	/* Otherwise: Check if we have the capacity to send. */
++	return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
++}
++
++static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *packet = ERR_PTR(-ENOENT);
++	struct ssh_packet *p, *n;
++
++	spin_lock(&ptl->queue.lock);
++	list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
++		/*
++		 * If we are canceling or completing this packet, ignore it.
++		 * It's going to be removed from this queue shortly.
++		 */
++		if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++			continue;
++
++		/*
++		 * Packets should be ordered non-blocking/to-be-resent first.
++		 * If we cannot process this packet, assume that we can't
++		 * process any following packet either and abort.
++		 */
++		if (!ssh_ptl_tx_can_process(p)) {
++			packet = ERR_PTR(-EBUSY);
++			break;
++		}
++
++		/*
++		 * We are allowed to change the state now. Remove it from the
++		 * queue and mark it as being transmitted.
++		 */
++
++		list_del(&p->queue_node);
++
++		set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
++		/* Ensure that state never gets zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
++
++		/*
++		 * Update number of tries. This directly influences the
++		 * priority in case the packet is re-submitted (e.g. via
++		 * timeout/NAK). Note that all reads and writes to the
++		 * priority after the first submission are guarded by the
++		 * queue lock.
++		 */
++		ssh_packet_next_try(p);
++
++		packet = p;
++		break;
++	}
++	spin_unlock(&ptl->queue.lock);
++
++	return packet;
++}
++
++static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *p;
++
++	p = ssh_ptl_tx_pop(ptl);
++	if (IS_ERR(p))
++		return p;
++
++	if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
++		ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
++		ssh_ptl_pending_push(p);
++	} else {
++		ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
++	}
++
++	return p;
++}
++
++static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
++
++	/* Transition state to "transmitted". */
++	set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
++	/* Ensure that state never gets zero. */
++	smp_mb__before_atomic();
++	clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
++
++	/* If the packet is unsequenced, we're done: Lock and complete. */
++	if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
++		ssh_ptl_remove_and_complete(packet, 0);
++	}
++
++	/*
++	 * Notify that a packet transmission has finished. In general we're only
++	 * waiting for one packet (if any), so wake_up_all should be fine.
++	 */
++	wake_up_all(&ptl->tx.packet_wq);
++}
++
++static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
++{
++	/* Transmission failure: Lock the packet and try to complete it. */
++	set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
++	/* Ensure that state never gets zero. */
++	smp_mb__before_atomic();
++	clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
++
++	ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
++	ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
++
++	ssh_ptl_remove_and_complete(packet, status);
++
++	/*
++	 * Notify that a packet transmission has finished. In general we're only
++	 * waiting for one packet (if any), so wake_up_all should be fine.
++	 */
++	wake_up_all(&packet->ptl->tx.packet_wq);
++}
++
++static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl)
++{
++	int status;
++
++	status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt);
++	reinit_completion(&ptl->tx.thread_cplt_pkt);
++
++	/*
++	 * Ensure completion is cleared before continuing to avoid lost update
++	 * problems.
++	 */
++	smp_mb__after_atomic();
++
++	return status;
++}
++
++static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout)
++{
++	long status;
++
++	status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx,
++							   timeout);
++	reinit_completion(&ptl->tx.thread_cplt_tx);
++
++	/*
++	 * Ensure completion is cleared before continuing to avoid lost update
++	 * problems.
++	 */
++	smp_mb__after_atomic();
++
++	return status;
++}
++
++static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
++{
++	long timeout = SSH_PTL_TX_TIMEOUT;
++	size_t offset = 0;
++
++	/* Note: Flush-packets don't have any data. */
++	if (unlikely(!packet->data.ptr))
++		return 0;
++
++	ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
++	print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
++			     packet->data.ptr, packet->data.len, false);
++
++	do {
++		ssize_t status, len;
++		u8 *buf;
++
++		buf = packet->data.ptr + offset;
++		len = packet->data.len - offset;
++
++		status = serdev_device_write_buf(ptl->serdev, buf, len);
++		if (status < 0)
++			return status;
++
++		if (status == len)
++			return 0;
++
++		offset += status;
++
++		timeout = ssh_ptl_tx_wait_transfer(ptl, timeout);
++		if (kthread_should_stop() || !atomic_read(&ptl->tx.running))
++			return -ESHUTDOWN;
++
++		if (timeout < 0)
++			return -EINTR;
++
++		if (timeout == 0)
++			return -ETIMEDOUT;
++	} while (true);
++}
++
++static int ssh_ptl_tx_threadfn(void *data)
++{
++	struct ssh_ptl *ptl = data;
++
++	while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) {
++		struct ssh_packet *packet;
++		int status;
++
++		/* Try to get the next packet. */
++		packet = ssh_ptl_tx_next(ptl);
++
++		/* If no packet can be processed, we are done. */
++		if (IS_ERR(packet)) {
++			ssh_ptl_tx_wait_packet(ptl);
++			continue;
++		}
++
++		/* Transfer and complete packet. */
++		status = ssh_ptl_tx_packet(ptl, packet);
++		if (status)
++			ssh_ptl_tx_compl_error(packet, status);
++		else
++			ssh_ptl_tx_compl_success(packet);
++
++		ssh_packet_put(packet);
++	}
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
++ * packet.
++ * @ptl: The packet transport layer.
++ *
++ * Wakes up the packet transmitter thread, notifying it that a new packet has
++ * arrived and is ready for transfer. If the packet transport layer has been
++ * shut down, calls to this function will be ignored.
++ */
++static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl)
++{
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return;
++
++	complete(&ptl->tx.thread_cplt_pkt);
++}
++
++/**
++ * ssh_ptl_tx_start() - Start packet transmitter thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_tx_start(struct ssh_ptl *ptl)
++{
++	atomic_set_release(&ptl->tx.running, 1);
++
++	ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
++	if (IS_ERR(ptl->tx.thread))
++		return PTR_ERR(ptl->tx.thread);
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_tx_stop() - Stop packet transmitter thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
++{
++	int status = 0;
++
++	if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
++		/* Tell thread to stop. */
++		atomic_set_release(&ptl->tx.running, 0);
++
++		/*
++		 * Wake up thread in case it is paused. Do not use wakeup
++		 * helpers as this may be called when the shutdown bit has
++		 * already been set.
++		 */
++		complete(&ptl->tx.thread_cplt_pkt);
++		complete(&ptl->tx.thread_cplt_tx);
++
++		/* Finally, wait for thread to stop. */
++		status = kthread_stop(ptl->tx.thread);
++		ptl->tx.thread = NULL;
++	}
++
++	return status;
++}
++
++static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
++{
++	struct ssh_packet *packet = ERR_PTR(-ENOENT);
++	struct ssh_packet *p, *n;
++
++	spin_lock(&ptl->pending.lock);
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		/*
++		 * We generally expect packets to be in order, so first packet
++		 * to be added to pending is first to be sent, is first to be
++		 * ACKed.
++		 */
++		if (unlikely(ssh_packet_get_seq(p) != seq_id))
++			continue;
++
++		/*
++		 * In case we receive an ACK while handling a transmission
++		 * error completion. The packet will be removed shortly.
++		 */
++		if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
++			packet = ERR_PTR(-EPERM);
++			break;
++		}
++
++		/*
++		 * Mark the packet as ACKed and remove it from pending by
++		 * removing its node and decrementing the pending counter.
++		 */
++		set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
++		/* Ensure that state never gets zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&ptl->pending.count);
++		list_del(&p->pending_node);
++		packet = p;
++
++		break;
++	}
++	spin_unlock(&ptl->pending.lock);
++
++	return packet;
++}
++
++static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
++{
++	wait_event(packet->ptl->tx.packet_wq,
++		   test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ||
++		   test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
++}
++
++static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
++{
++	struct ssh_packet *p;
++
++	p = ssh_ptl_ack_pop(ptl, seq);
++	if (IS_ERR(p)) {
++		if (PTR_ERR(p) == -ENOENT) {
++			/*
++			 * The packet has not been found in the set of pending
++			 * packets.
++			 */
++			ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
++		} else {
++			/*
++			 * The packet is pending, but we are not allowed to take
++			 * it because it has been locked.
++			 */
++			WARN_ON(PTR_ERR(p) != -EPERM);
++		}
++		return;
++	}
++
++	ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
++
++	/*
++	 * It is possible that the packet has been transmitted, but the state
++	 * has not been updated from "transmitting" to "transmitted" yet.
++	 * In that case, we need to wait for this transition to occur in order
++	 * to determine between success or failure.
++	 *
++	 * On transmission failure, the packet will be locked after this call.
++	 * On success, the transmitted bit will be set.
++	 */
++	ssh_ptl_wait_until_transmitted(p);
++
++	/*
++	 * The packet will already be locked in case of a transmission error or
++	 * cancellation. Let the transmitter or cancellation issuer complete the
++	 * packet.
++	 */
++	if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
++		if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state)))
++			ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
++
++		ssh_packet_put(p);
++		return;
++	}
++
++	ssh_ptl_remove_and_complete(p, 0);
++	ssh_packet_put(p);
++
++	if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
++		ssh_ptl_tx_wakeup_packet(ptl);
++}
++
++/**
++ * ssh_ptl_submit() - Submit a packet to the transport layer.
++ * @ptl: The packet transport layer to submit the packet to.
++ * @p:   The packet to submit.
++ *
++ * Submits a new packet to the transport layer, queuing it to be sent. This
++ * function should not be used for re-submission.
++ *
++ * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
++ * the packet has been canceled prior to submission, %-EALREADY if the packet
++ * has already been submitted, or %-ESHUTDOWN if the packet transport layer
++ * has been shut down.
++ */
++int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
++{
++	struct ssh_ptl *ptl_old;
++	int status;
++
++	/* Validate packet fields. */
++	if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
++		if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
++			return -EINVAL;
++	} else if (!p->data.ptr) {
++		return -EINVAL;
++	}
++
++	/*
++	 * The ptl reference only gets set on or before the first submission.
++	 * After the first submission, it has to be read-only.
++	 *
++	 * Note that ptl may already be set from upper-layer request
++	 * submission, thus we cannot expect it to be NULL.
++	 */
++	ptl_old = READ_ONCE(p->ptl);
++	if (!ptl_old)
++		WRITE_ONCE(p->ptl, ptl);
++	else if (WARN_ON(ptl_old != ptl))
++		return -EALREADY;	/* Submitted on different PTL. */
++
++	status = ssh_ptl_queue_push(p);
++	if (status)
++		return status;
++
++	if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) ||
++	    (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
++		ssh_ptl_tx_wakeup_packet(ptl);
++
++	return 0;
++}
++
++/*
++ * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
++ * @packet: The packet to re-submit.
++ *
++ * Re-submits the given packet: Checks if it can be re-submitted and queues it
++ * if it can, resetting the packet timestamp in the process. Must be called
++ * with the pending lock held.
++ *
++ * Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
++ * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
++ * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
++ */
++static int __ssh_ptl_resubmit(struct ssh_packet *packet)
++{
++	int status;
++	u8 try;
++
++	lockdep_assert_held(&packet->ptl->pending.lock);
++
++	spin_lock(&packet->ptl->queue.lock);
++
++	/* Check if the packet is out of tries. */
++	try = ssh_packet_priority_get_try(packet->priority);
++	if (try >= SSH_PTL_MAX_PACKET_TRIES) {
++		spin_unlock(&packet->ptl->queue.lock);
++		return -ECANCELED;
++	}
++
++	status = __ssh_ptl_queue_push(packet);
++	if (status) {
++		/*
++		 * An error here indicates that the packet has either already
++		 * been queued, been locked, or the transport layer is being
++		 * shut down. In all cases: Ignore the error.
++		 */
++		spin_unlock(&packet->ptl->queue.lock);
++		return status;
++	}
++
++	packet->timestamp = KTIME_MAX;
++
++	spin_unlock(&packet->ptl->queue.lock);
++	return 0;
++}
++
++static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *p;
++	bool resub = false;
++
++	/*
++	 * Note: We deliberately do not remove/attempt to cancel and complete
++	 * packets that are out of tires in this function. The packet will be
++	 * eventually canceled and completed by the timeout. Removing the packet
++	 * here could lead to overly eager cancellation if the packet has not
++	 * been re-transmitted yet but the tries-counter already updated (i.e
++	 * ssh_ptl_tx_next() removed the packet from the queue and updated the
++	 * counter, but re-transmission for the last try has not actually
++	 * started yet).
++	 */
++
++	spin_lock(&ptl->pending.lock);
++
++	/* Re-queue all pending packets. */
++	list_for_each_entry(p, &ptl->pending.head, pending_node) {
++		/*
++		 * Re-submission fails if the packet is out of tries, has been
++		 * locked, is already queued, or the layer is being shut down.
++		 * No need to re-schedule tx-thread in those cases.
++		 */
++		if (!__ssh_ptl_resubmit(p))
++			resub = true;
++	}
++
++	spin_unlock(&ptl->pending.lock);
++
++	if (resub)
++		ssh_ptl_tx_wakeup_packet(ptl);
++}
++
++/**
++ * ssh_ptl_cancel() - Cancel a packet.
++ * @p: The packet to cancel.
++ *
++ * Cancels a packet. There are no guarantees on when completion and release
++ * callbacks will be called. This may occur during execution of this function
++ * or may occur at any point later.
++ *
++ * Note that it is not guaranteed that the packet will actually be canceled if
++ * the packet is concurrently completed by another process. The only guarantee
++ * of this function is that the packet will be completed (with success,
++ * failure, or cancellation) and released from the transport layer in a
++ * reasonable time-frame.
++ *
++ * May be called before the packet has been submitted, in which case any later
++ * packet submission fails.
++ */
++void ssh_ptl_cancel(struct ssh_packet *p)
++{
++	if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
++		return;
++
++	/*
++	 * Lock packet and commit with memory barrier. If this packet has
++	 * already been locked, it's going to be removed and completed by
++	 * another party, which should have precedence.
++	 */
++	if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++		return;
++
++	/*
++	 * By marking the packet as locked and employing the implicit memory
++	 * barrier of test_and_set_bit, we have guaranteed that, at this point,
++	 * the packet cannot be added to the queue any more.
++	 *
++	 * In case the packet has never been submitted, packet->ptl is NULL. If
++	 * the packet is currently being submitted, packet->ptl may be NULL or
++	 * non-NULL. Due marking the packet as locked above and committing with
++	 * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
++	 * the packet will never be added to the queue. If packet->ptl is
++	 * non-NULL, we don't have any guarantees.
++	 */
++
++	if (READ_ONCE(p->ptl)) {
++		ssh_ptl_remove_and_complete(p, -ECANCELED);
++
++		if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
++			ssh_ptl_tx_wakeup_packet(p->ptl);
++
++	} else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
++		__ssh_ptl_complete(p, -ECANCELED);
++	}
++}
++
++/* Must be called with pending lock held */
++static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
++{
++	lockdep_assert_held(&p->ptl->pending.lock);
++
++	if (p->timestamp != KTIME_MAX)
++		return ktime_add(p->timestamp, timeout);
++	else
++		return KTIME_MAX;
++}
++
++static void ssh_ptl_timeout_reap(struct work_struct *work)
++{
++	struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
++	struct ssh_packet *p, *n;
++	LIST_HEAD(claimed);
++	ktime_t now = ktime_get_coarse_boottime();
++	ktime_t timeout = ptl->rtx_timeout.timeout;
++	ktime_t next = KTIME_MAX;
++	bool resub = false;
++	int status;
++
++	/*
++	 * Mark reaper as "not pending". This is done before checking any
++	 * packets to avoid lost-update type problems.
++	 */
++	spin_lock(&ptl->rtx_timeout.lock);
++	ptl->rtx_timeout.expires = KTIME_MAX;
++	spin_unlock(&ptl->rtx_timeout.lock);
++
++	spin_lock(&ptl->pending.lock);
++
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		ktime_t expires = ssh_packet_get_expiration(p, timeout);
++
++		/*
++		 * Check if the timeout hasn't expired yet. Find out next
++		 * expiration date to be handled after this run.
++		 */
++		if (ktime_after(expires, now)) {
++			next = ktime_before(expires, next) ? expires : next;
++			continue;
++		}
++
++		status = __ssh_ptl_resubmit(p);
++
++		/*
++		 * Re-submission fails if the packet is out of tries, has been
++		 * locked, is already queued, or the layer is being shut down.
++		 * No need to re-schedule tx-thread in those cases.
++		 */
++		if (!status)
++			resub = true;
++
++		/* Go to next packet if this packet is not out of tries. */
++		if (status != -ECANCELED)
++			continue;
++
++		/* No more tries left: Cancel the packet. */
++
++		/*
++		 * If someone else has locked the packet already, don't use it
++		 * and let the other party complete it.
++		 */
++		if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++			continue;
++
++		/*
++		 * We have now marked the packet as locked. Thus it cannot be
++		 * added to the pending list again after we've removed it here.
++		 * We can therefore re-use the pending_node of this packet
++		 * temporarily.
++		 */
++
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&ptl->pending.count);
++		list_del(&p->pending_node);
++
++		list_add_tail(&p->pending_node, &claimed);
++	}
++
++	spin_unlock(&ptl->pending.lock);
++
++	/* Cancel and complete the packet. */
++	list_for_each_entry_safe(p, n, &claimed, pending_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
++			ssh_ptl_queue_remove(p);
++			__ssh_ptl_complete(p, -ETIMEDOUT);
++		}
++
++		/*
++		 * Drop the reference we've obtained by removing it from
++		 * the pending set.
++		 */
++		list_del(&p->pending_node);
++		ssh_packet_put(p);
++	}
++
++	/* Ensure that reaper doesn't run again immediately. */
++	next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
++	if (next != KTIME_MAX)
++		ssh_ptl_timeout_reaper_mod(ptl, now, next);
++
++	if (resub)
++		ssh_ptl_tx_wakeup_packet(ptl);
++}
++
++static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
++{
++	int i;
++
++	/*
++	 * Check if SEQ has been seen recently (i.e. packet was
++	 * re-transmitted and we should ignore it).
++	 */
++	for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
++		if (likely(ptl->rx.blocked.seqs[i] != seq))
++			continue;
++
++		ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
++		return true;
++	}
++
++	/* Update list of blocked sequence IDs. */
++	ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
++	ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
++				  % ARRAY_SIZE(ptl->rx.blocked.seqs);
++
++	return false;
++}
++
++static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
++				 const struct ssh_frame *frame,
++				 const struct ssam_span *payload)
++{
++	if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
++		return;
++
++	ptl->ops.data_received(ptl, payload);
++}
++
++static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
++{
++	struct ssh_packet *packet;
++	struct ssam_span buf;
++	struct msgbuf msgb;
++	int status;
++
++	status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
++	if (status) {
++		ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
++		return;
++	}
++
++	ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
++			&ssh_ptl_ctrl_packet_ops);
++
++	msgb_init(&msgb, buf.ptr, buf.len);
++	msgb_push_ack(&msgb, seq);
++	ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
++
++	ssh_ptl_submit(ptl, packet);
++	ssh_packet_put(packet);
++}
++
++static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *packet;
++	struct ssam_span buf;
++	struct msgbuf msgb;
++	int status;
++
++	status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
++	if (status) {
++		ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
++		return;
++	}
++
++	ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
++			&ssh_ptl_ctrl_packet_ops);
++
++	msgb_init(&msgb, buf.ptr, buf.len);
++	msgb_push_nak(&msgb);
++	ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
++
++	ssh_ptl_submit(ptl, packet);
++	ssh_packet_put(packet);
++}
++
++static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
++{
++	struct ssh_frame *frame;
++	struct ssam_span payload;
++	struct ssam_span aligned;
++	bool syn_found;
++	int status;
++
++	/* Find SYN. */
++	syn_found = sshp_find_syn(source, &aligned);
++
++	if (unlikely(aligned.ptr - source->ptr) > 0) {
++		ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
++
++		/*
++		 * Notes:
++		 * - This might send multiple NAKs in case the communication
++		 *   starts with an invalid SYN and is broken down into multiple
++		 *   pieces. This should generally be handled fine, we just
++		 *   might receive duplicate data in this case, which is
++		 *   detected when handling data frames.
++		 * - This path will also be executed on invalid CRCs: When an
++		 *   invalid CRC is encountered, the code below will skip data
++		 *   until directly after the SYN. This causes the search for
++		 *   the next SYN, which is generally not placed directly after
++		 *   the last one.
++		 *
++		 *   Open question: Should we send this in case of invalid
++		 *   payload CRCs if the frame-type is non-sequential (current
++		 *   implementation) or should we drop that frame without
++		 *   telling the EC?
++		 */
++		ssh_ptl_send_nak(ptl);
++	}
++
++	if (unlikely(!syn_found))
++		return aligned.ptr - source->ptr;
++
++	/* Parse and validate frame. */
++	status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
++				  SSH_PTL_RX_BUF_LEN);
++	if (status)	/* Invalid frame: skip to next SYN. */
++		return aligned.ptr - source->ptr + sizeof(u16);
++	if (!frame)	/* Not enough data. */
++		return aligned.ptr - source->ptr;
++
++	switch (frame->type) {
++	case SSH_FRAME_TYPE_ACK:
++		ssh_ptl_acknowledge(ptl, frame->seq);
++		break;
++
++	case SSH_FRAME_TYPE_NAK:
++		ssh_ptl_resubmit_pending(ptl);
++		break;
++
++	case SSH_FRAME_TYPE_DATA_SEQ:
++		ssh_ptl_send_ack(ptl, frame->seq);
++		fallthrough;
++
++	case SSH_FRAME_TYPE_DATA_NSQ:
++		ssh_ptl_rx_dataframe(ptl, frame, &payload);
++		break;
++
++	default:
++		ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n",
++			 frame->type);
++		break;
++	}
++
++	return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
++}
++
++static int ssh_ptl_rx_threadfn(void *data)
++{
++	struct ssh_ptl *ptl = data;
++
++	while (true) {
++		struct ssam_span span;
++		size_t offs = 0;
++		size_t n;
++
++		wait_event_interruptible(ptl->rx.wq,
++					 !kfifo_is_empty(&ptl->rx.fifo) ||
++					 kthread_should_stop());
++		if (kthread_should_stop())
++			break;
++
++		/* Copy from fifo to evaluation buffer. */
++		n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
++
++		ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
++		print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
++				     ptl->rx.buf.ptr + ptl->rx.buf.len - n,
++				     n, false);
++
++		/* Parse until we need more bytes or buffer is empty. */
++		while (offs < ptl->rx.buf.len) {
++			sshp_buf_span_from(&ptl->rx.buf, offs, &span);
++			n = ssh_ptl_rx_eval(ptl, &span);
++			if (n == 0)
++				break;	/* Need more bytes. */
++
++			offs += n;
++		}
++
++		/* Throw away the evaluated parts. */
++		sshp_buf_drop(&ptl->rx.buf, offs);
++	}
++
++	return 0;
++}
++
++static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
++{
++	wake_up(&ptl->rx.wq);
++}
++
++/**
++ * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_rx_start(struct ssh_ptl *ptl)
++{
++	if (ptl->rx.thread)
++		return 0;
++
++	ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
++				     "ssam_serial_hub-rx");
++	if (IS_ERR(ptl->rx.thread))
++		return PTR_ERR(ptl->rx.thread);
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
++{
++	int status = 0;
++
++	if (ptl->rx.thread) {
++		status = kthread_stop(ptl->rx.thread);
++		ptl->rx.thread = NULL;
++	}
++
++	return status;
++}
++
++/**
++ * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
++ * layer.
++ * @ptl: The packet transport layer.
++ * @buf: Pointer to the data to push to the layer.
++ * @n:   Size of the data to push to the layer, in bytes.
++ *
++ * Pushes data from a lower-layer transport to the receiver fifo buffer of the
++ * packet layer and notifies the receiver thread. Calls to this function are
++ * ignored once the packet layer has been shut down.
++ *
++ * Return: Returns the number of bytes transferred (positive or zero) on
++ * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
++ */
++int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
++{
++	int used;
++
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return -ESHUTDOWN;
++
++	used = kfifo_in(&ptl->rx.fifo, buf, n);
++	if (used)
++		ssh_ptl_rx_wakeup(ptl);
++
++	return used;
++}
++
++/**
++ * ssh_ptl_shutdown() - Shut down the packet transport layer.
++ * @ptl: The packet transport layer.
++ *
++ * Shuts down the packet transport layer, removing and canceling all queued
++ * and pending packets. Packets canceled by this operation will be completed
++ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
++ * stopped.
++ *
++ * As a result of this function, the transport layer will be marked as shut
++ * down. Submission of packets after the transport layer has been shut down
++ * will fail with %-ESHUTDOWN.
++ */
++void ssh_ptl_shutdown(struct ssh_ptl *ptl)
++{
++	LIST_HEAD(complete_q);
++	LIST_HEAD(complete_p);
++	struct ssh_packet *p, *n;
++	int status;
++
++	/* Ensure that no new packets (including ACK/NAK) can be submitted. */
++	set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
++	/*
++	 * Ensure that the layer gets marked as shut-down before actually
++	 * stopping it. In combination with the check in ssh_ptl_queue_push(),
++	 * this guarantees that no new packets can be added and all already
++	 * queued packets are properly canceled. In combination with the check
++	 * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
++	 * properly cut off.
++	 */
++	smp_mb__after_atomic();
++
++	status = ssh_ptl_rx_stop(ptl);
++	if (status)
++		ptl_err(ptl, "ptl: failed to stop receiver thread\n");
++
++	status = ssh_ptl_tx_stop(ptl);
++	if (status)
++		ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
++
++	cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
++
++	/*
++	 * At this point, all threads have been stopped. This means that the
++	 * only references to packets from inside the system are in the queue
++	 * and pending set.
++	 *
++	 * Note: We still need locks here because someone could still be
++	 * canceling packets.
++	 *
++	 * Note 2: We can re-use queue_node (or pending_node) if we mark the
++	 * packet as locked an then remove it from the queue (or pending set
++	 * respectively). Marking the packet as locked avoids re-queuing
++	 * (which should already be prevented by having stopped the treads...)
++	 * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
++	 * new list via other threads (e.g. cancellation).
++	 *
++	 * Note 3: There may be overlap between complete_p and complete_q.
++	 * This is handled via test_and_set_bit() on the "completed" flag
++	 * (also handles cancellation).
++	 */
++
++	/* Mark queued packets as locked and move them to complete_q. */
++	spin_lock(&ptl->queue.lock);
++	list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
++		/* Ensure that state does not get zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
++
++		list_del(&p->queue_node);
++		list_add_tail(&p->queue_node, &complete_q);
++	}
++	spin_unlock(&ptl->queue.lock);
++
++	/* Mark pending packets as locked and move them to complete_p. */
++	spin_lock(&ptl->pending.lock);
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
++		/* Ensure that state does not get zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		list_del(&p->pending_node);
++		list_add_tail(&p->pending_node, &complete_q);
++	}
++	atomic_set(&ptl->pending.count, 0);
++	spin_unlock(&ptl->pending.lock);
++
++	/* Complete and drop packets on complete_q. */
++	list_for_each_entry(p, &complete_q, queue_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++			__ssh_ptl_complete(p, -ESHUTDOWN);
++
++		ssh_packet_put(p);
++	}
++
++	/* Complete and drop packets on complete_p. */
++	list_for_each_entry(p, &complete_p, pending_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++			__ssh_ptl_complete(p, -ESHUTDOWN);
++
++		ssh_packet_put(p);
++	}
++
++	/*
++	 * At this point we have guaranteed that the system doesn't reference
++	 * any packets any more.
++	 */
++}
++
++/**
++ * ssh_ptl_init() - Initialize packet transport layer.
++ * @ptl:    The packet transport layer to initialize.
++ * @serdev: The underlying serial device, i.e. the lower-level transport.
++ * @ops:    Packet layer operations.
++ *
++ * Initializes the given packet transport layer. Transmitter and receiver
++ * threads must be started separately via ssh_ptl_tx_start() and
++ * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
++ * lower-level transport layer has been set up.
++ *
++ * Return: Returns zero on success and a nonzero error code on failure.
++ */
++int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
++		 struct ssh_ptl_ops *ops)
++{
++	int i, status;
++
++	ptl->serdev = serdev;
++	ptl->state = 0;
++
++	spin_lock_init(&ptl->queue.lock);
++	INIT_LIST_HEAD(&ptl->queue.head);
++
++	spin_lock_init(&ptl->pending.lock);
++	INIT_LIST_HEAD(&ptl->pending.head);
++	atomic_set_release(&ptl->pending.count, 0);
++
++	ptl->tx.thread = NULL;
++	atomic_set(&ptl->tx.running, 0);
++	init_completion(&ptl->tx.thread_cplt_pkt);
++	init_completion(&ptl->tx.thread_cplt_tx);
++	init_waitqueue_head(&ptl->tx.packet_wq);
++
++	ptl->rx.thread = NULL;
++	init_waitqueue_head(&ptl->rx.wq);
++
++	spin_lock_init(&ptl->rtx_timeout.lock);
++	ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
++	ptl->rtx_timeout.expires = KTIME_MAX;
++	INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
++
++	ptl->ops = *ops;
++
++	/* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
++	for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
++		ptl->rx.blocked.seqs[i] = U16_MAX;
++	ptl->rx.blocked.offset = 0;
++
++	status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
++	if (status)
++		return status;
++
++	status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
++	if (status)
++		kfifo_free(&ptl->rx.fifo);
++
++	return status;
++}
++
++/**
++ * ssh_ptl_destroy() - Deinitialize packet transport layer.
++ * @ptl: The packet transport layer to deinitialize.
++ *
++ * Deinitializes the given packet transport layer and frees resources
++ * associated with it. If receiver and/or transmitter threads have been
++ * started, the layer must first be shut down via ssh_ptl_shutdown() before
++ * this function can be called.
++ */
++void ssh_ptl_destroy(struct ssh_ptl *ptl)
++{
++	kfifo_free(&ptl->rx.fifo);
++	sshp_buf_free(&ptl->rx.buf);
++}
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
+new file mode 100644
+index 000000000000..058f111292ca
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
+@@ -0,0 +1,187 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH packet transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
++#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
++
++#include <linux/atomic.h>
++#include <linux/kfifo.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/serdev.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include "ssh_parser.h"
++
++/**
++ * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl.
++ *
++ * @SSH_PTL_SF_SHUTDOWN_BIT:
++ *	Indicates that the packet transport layer has been shut down or is
++ *	being shut down and should not accept any new packets/data.
++ */
++enum ssh_ptl_state_flags {
++	SSH_PTL_SF_SHUTDOWN_BIT,
++};
++
++/**
++ * struct ssh_ptl_ops - Callback operations for packet transport layer.
++ * @data_received: Function called when a data-packet has been received. Both,
++ *                 the packet layer on which the packet has been received and
++ *                 the packet's payload data are provided to this function.
++ */
++struct ssh_ptl_ops {
++	void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
++};
++
++/**
++ * struct ssh_ptl - SSH packet transport layer.
++ * @serdev:        Serial device providing the underlying data transport.
++ * @state:         State(-flags) of the transport layer.
++ * @queue:         Packet submission queue.
++ * @queue.lock:    Lock for modifying the packet submission queue.
++ * @queue.head:    List-head of the packet submission queue.
++ * @pending:       Set/list of pending packets.
++ * @pending.lock:  Lock for modifying the pending set.
++ * @pending.head:  List-head of the pending set/list.
++ * @pending.count: Number of currently pending packets.
++ * @tx:            Transmitter subsystem.
++ * @tx.running:    Flag indicating (desired) transmitter thread state.
++ * @tx.thread:     Transmitter thread.
++ * @tx.thread_cplt_tx:  Completion for transmitter thread waiting on transfer.
++ * @tx.thread_cplt_pkt: Completion for transmitter thread waiting on packets.
++ * @tx.packet_wq:  Waitqueue-head for packet transmit completion.
++ * @rx:            Receiver subsystem.
++ * @rx.thread:     Receiver thread.
++ * @rx.wq:         Waitqueue-head for receiver thread.
++ * @rx.fifo:       Buffer for receiving data/pushing data to receiver thread.
++ * @rx.buf:        Buffer for evaluating data on receiver thread.
++ * @rx.blocked:    List of recent/blocked sequence IDs to detect retransmission.
++ * @rx.blocked.seqs:   Array of blocked sequence IDs.
++ * @rx.blocked.offset: Offset indicating where a new ID should be inserted.
++ * @rtx_timeout:   Retransmission timeout subsystem.
++ * @rtx_timeout.lock:    Lock for modifying the retransmission timeout reaper.
++ * @rtx_timeout.timeout: Timeout interval for retransmission.
++ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
++ * @rtx_timeout.reaper:  Work performing timeout checks and subsequent actions.
++ * @ops:           Packet layer operations.
++ */
++struct ssh_ptl {
++	struct serdev_device *serdev;
++	unsigned long state;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++	} queue;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++		atomic_t count;
++	} pending;
++
++	struct {
++		atomic_t running;
++		struct task_struct *thread;
++		struct completion thread_cplt_tx;
++		struct completion thread_cplt_pkt;
++		struct wait_queue_head packet_wq;
++	} tx;
++
++	struct {
++		struct task_struct *thread;
++		struct wait_queue_head wq;
++		struct kfifo fifo;
++		struct sshp_buf buf;
++
++		struct {
++			u16 seqs[8];
++			u16 offset;
++		} blocked;
++	} rx;
++
++	struct {
++		spinlock_t lock;
++		ktime_t timeout;
++		ktime_t expires;
++		struct delayed_work reaper;
++	} rtx_timeout;
++
++	struct ssh_ptl_ops ops;
++};
++
++#define __ssam_prcond(func, p, fmt, ...)		\
++	do {						\
++		typeof(p) __p = (p);			\
++							\
++		if (__p)				\
++			func(__p, fmt, ##__VA_ARGS__);	\
++	} while (0)
++
++#define ptl_dbg(p, fmt, ...)  dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_err(p, fmt, ...)  dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
++
++#define to_ssh_ptl(ptr, member) \
++	container_of(ptr, struct ssh_ptl, member)
++
++int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
++		 struct ssh_ptl_ops *ops);
++
++void ssh_ptl_destroy(struct ssh_ptl *ptl);
++
++/**
++ * ssh_ptl_get_device() - Get device associated with packet transport layer.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns the device on which the given packet transport layer builds
++ * upon.
++ */
++static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
++{
++	return ptl->serdev ? &ptl->serdev->dev : NULL;
++}
++
++int ssh_ptl_tx_start(struct ssh_ptl *ptl);
++int ssh_ptl_tx_stop(struct ssh_ptl *ptl);
++int ssh_ptl_rx_start(struct ssh_ptl *ptl);
++int ssh_ptl_rx_stop(struct ssh_ptl *ptl);
++void ssh_ptl_shutdown(struct ssh_ptl *ptl);
++
++int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
++void ssh_ptl_cancel(struct ssh_packet *p);
++
++int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
++
++/**
++ * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
++ * transfer.
++ * @ptl: The packet transport layer.
++ *
++ * Wakes up the packet transmitter thread, notifying it that the underlying
++ * transport has more space for data to be transmitted. If the packet
++ * transport layer has been shut down, calls to this function will be ignored.
++ */
++static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl)
++{
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return;
++
++	complete(&ptl->tx.thread_cplt_tx);
++}
++
++void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
++		     u8 priority, const struct ssh_packet_ops *ops);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
+diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
+new file mode 100644
+index 000000000000..e2dead8de94a
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_parser.c
+@@ -0,0 +1,228 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH message parser.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/compiler.h>
++#include <linux/device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include "ssh_parser.h"
++
++/**
++ * sshp_validate_crc() - Validate a CRC in raw message data.
++ * @src: The span of data over which the CRC should be computed.
++ * @crc: The pointer to the expected u16 CRC value.
++ *
++ * Computes the CRC of the provided data span (@src), compares it to the CRC
++ * stored at the given address (@crc), and returns the result of this
++ * comparison, i.e. %true if equal. This function is intended to run on raw
++ * input/message data.
++ *
++ * Return: Returns %true if the computed CRC matches the stored CRC, %false
++ * otherwise.
++ */
++static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
++{
++	u16 actual = ssh_crc(src->ptr, src->len);
++	u16 expected = get_unaligned_le16(crc);
++
++	return actual == expected;
++}
++
++/**
++ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
++ * @src: The data span to check the start of.
++ */
++static bool sshp_starts_with_syn(const struct ssam_span *src)
++{
++	return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
++}
++
++/**
++ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
++ * @src: The data span to search in.
++ * @rem: The span (output) indicating the remaining data, starting with SSH
++ *       SYN bytes, if found.
++ *
++ * Search for SSH SYN bytes in the given source span. If found, set the @rem
++ * span to the remaining data, starting with the first SYN bytes and capped by
++ * the source span length, and return %true. This function does not copy any
++ * data, but rather only sets pointers to the respective start addresses and
++ * length values.
++ *
++ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
++ * span at the end of the source span and return %false.
++ *
++ * If partial SSH SYN bytes could be found at the end of the source span, set
++ * the @rem span to cover these partial SYN bytes, capped by the end of the
++ * source span, and return %false. This function should then be re-run once
++ * more data is available.
++ *
++ * Return: Returns %true if a complete SSH SYN sequence could be found,
++ * %false otherwise.
++ */
++bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
++{
++	size_t i;
++
++	for (i = 0; i < src->len - 1; i++) {
++		if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
++			rem->ptr = src->ptr + i;
++			rem->len = src->len - i;
++			return true;
++		}
++	}
++
++	if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
++		rem->ptr = src->ptr + src->len - 1;
++		rem->len = 1;
++		return false;
++	}
++
++	rem->ptr = src->ptr + src->len;
++	rem->len = 0;
++	return false;
++}
++
++/**
++ * sshp_parse_frame() - Parse SSH frame.
++ * @dev: The device used for logging.
++ * @source: The source to parse from.
++ * @frame: The parsed frame (output).
++ * @payload: The parsed payload (output).
++ * @maxlen: The maximum supported message length.
++ *
++ * Parses and validates a SSH frame, including its payload, from the given
++ * source. Sets the provided @frame pointer to the start of the frame and
++ * writes the limits of the frame payload to the provided @payload span
++ * pointer.
++ *
++ * This function does not copy any data, but rather only validates the message
++ * data and sets pointers (and length values) to indicate the respective parts.
++ *
++ * If no complete SSH frame could be found, the frame pointer will be set to
++ * the %NULL pointer and the payload span will be set to the null span (start
++ * pointer %NULL, size zero).
++ *
++ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
++ * the start of the message is invalid, %-EBADMSG if any (frame-header or
++ * payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than
++ * the maximum message length specified in the @maxlen parameter.
++ */
++int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
++		     struct ssh_frame **frame, struct ssam_span *payload,
++		     size_t maxlen)
++{
++	struct ssam_span sf;
++	struct ssam_span sp;
++
++	/* Initialize output. */
++	*frame = NULL;
++	payload->ptr = NULL;
++	payload->len = 0;
++
++	if (!sshp_starts_with_syn(source)) {
++		dev_warn(dev, "rx: parser: invalid start of frame\n");
++		return -ENOMSG;
++	}
++
++	/* Check for minimum packet length. */
++	if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
++		dev_dbg(dev, "rx: parser: not enough data for frame\n");
++		return 0;
++	}
++
++	/* Pin down frame. */
++	sf.ptr = source->ptr + sizeof(u16);
++	sf.len = sizeof(struct ssh_frame);
++
++	/* Validate frame CRC. */
++	if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
++		dev_warn(dev, "rx: parser: invalid frame CRC\n");
++		return -EBADMSG;
++	}
++
++	/* Ensure packet does not exceed maximum length. */
++	sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
++	if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) {
++		dev_warn(dev, "rx: parser: frame too large: %llu bytes\n",
++			 SSH_MESSAGE_LENGTH(sp.len));
++		return -EMSGSIZE;
++	}
++
++	/* Pin down payload. */
++	sp.ptr = sf.ptr + sf.len + sizeof(u16);
++
++	/* Check for frame + payload length. */
++	if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
++		dev_dbg(dev, "rx: parser: not enough data for payload\n");
++		return 0;
++	}
++
++	/* Validate payload CRC. */
++	if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
++		dev_warn(dev, "rx: parser: invalid payload CRC\n");
++		return -EBADMSG;
++	}
++
++	*frame = (struct ssh_frame *)sf.ptr;
++	*payload = sp;
++
++	dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n",
++		(*frame)->type, (*frame)->len);
++
++	return 0;
++}
++
++/**
++ * sshp_parse_command() - Parse SSH command frame payload.
++ * @dev: The device used for logging.
++ * @source: The source to parse from.
++ * @command: The parsed command (output).
++ * @command_data: The parsed command data/payload (output).
++ *
++ * Parses and validates a SSH command frame payload. Sets the @command pointer
++ * to the command header and the @command_data span to the command data (i.e.
++ * payload of the command). This will result in a zero-length span if the
++ * command does not have any associated data/payload. This function does not
++ * check the frame-payload-type field, which should be checked by the caller
++ * before calling this function.
++ *
++ * The @source parameter should be the complete frame payload, e.g. returned
++ * by the sshp_parse_frame() command.
++ *
++ * This function does not copy any data, but rather only validates the frame
++ * payload data and sets pointers (and length values) to indicate the
++ * respective parts.
++ *
++ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
++ * valid command-type frame payload, i.e. is too short.
++ */
++int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
++		       struct ssh_command **command,
++		       struct ssam_span *command_data)
++{
++	/* Check for minimum length. */
++	if (unlikely(source->len < sizeof(struct ssh_command))) {
++		*command = NULL;
++		command_data->ptr = NULL;
++		command_data->len = 0;
++
++		dev_err(dev, "rx: parser: command payload is too short\n");
++		return -ENOMSG;
++	}
++
++	*command = (struct ssh_command *)source->ptr;
++	command_data->ptr = source->ptr + sizeof(struct ssh_command);
++	command_data->len = source->len - sizeof(struct ssh_command);
++
++	dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n",
++		(*command)->tc, (*command)->cid);
++
++	return 0;
++}
+diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
+new file mode 100644
+index 000000000000..63c38d350988
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_parser.h
+@@ -0,0 +1,154 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH message parser.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
++#define _SURFACE_AGGREGATOR_SSH_PARSER_H
++
++#include <linux/device.h>
++#include <linux/kfifo.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++/**
++ * struct sshp_buf - Parser buffer for SSH messages.
++ * @ptr: Pointer to the beginning of the buffer.
++ * @len: Number of bytes used in the buffer.
++ * @cap: Maximum capacity of the buffer.
++ */
++struct sshp_buf {
++	u8    *ptr;
++	size_t len;
++	size_t cap;
++};
++
++/**
++ * sshp_buf_init() - Initialize a SSH parser buffer.
++ * @buf: The buffer to initialize.
++ * @ptr: The memory backing the buffer.
++ * @cap: The length of the memory backing the buffer, i.e. its capacity.
++ *
++ * Initializes the buffer with the given memory as backing and set its used
++ * length to zero.
++ */
++static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
++{
++	buf->ptr = ptr;
++	buf->len = 0;
++	buf->cap = cap;
++}
++
++/**
++ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
++ * @buf:   The buffer to initialize/allocate to.
++ * @cap:   The desired capacity of the buffer.
++ * @flags: The flags used for allocating the memory.
++ *
++ * Allocates @cap bytes and initializes the provided buffer struct with the
++ * allocated memory.
++ *
++ * Return: Returns zero on success and %-ENOMEM if allocation failed.
++ */
++static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
++{
++	u8 *ptr;
++
++	ptr = kzalloc(cap, flags);
++	if (!ptr)
++		return -ENOMEM;
++
++	sshp_buf_init(buf, ptr, cap);
++	return 0;
++}
++
++/**
++ * sshp_buf_free() - Free a SSH parser buffer.
++ * @buf: The buffer to free.
++ *
++ * Frees a SSH parser buffer by freeing the memory backing it and then
++ * resetting its pointer to %NULL and length and capacity to zero. Intended to
++ * free a buffer previously allocated with sshp_buf_alloc().
++ */
++static inline void sshp_buf_free(struct sshp_buf *buf)
++{
++	kfree(buf->ptr);
++	buf->ptr = NULL;
++	buf->len = 0;
++	buf->cap = 0;
++}
++
++/**
++ * sshp_buf_drop() - Drop data from the beginning of the buffer.
++ * @buf: The buffer to drop data from.
++ * @n:   The number of bytes to drop.
++ *
++ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
++ * the beginning of the buffer.
++ */
++static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
++{
++	memmove(buf->ptr, buf->ptr + n, buf->len - n);
++	buf->len -= n;
++}
++
++/**
++ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
++ * @buf:  The buffer to write the data into.
++ * @fifo: The fifo to read the data from.
++ *
++ * Transfers the data contained in the fifo to the buffer, removing it from
++ * the fifo. This function will try to transfer as much data as possible,
++ * limited either by the remaining space in the buffer or by the number of
++ * bytes available in the fifo.
++ *
++ * Return: Returns the number of bytes transferred.
++ */
++static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
++					     struct kfifo *fifo)
++{
++	size_t n;
++
++	n =  kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
++	buf->len += n;
++
++	return n;
++}
++
++/**
++ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
++ * @buf:    The buffer to create the span from.
++ * @offset: The offset in the buffer at which the span should start.
++ * @span:   The span to initialize (output).
++ *
++ * Initializes the provided span to point to the memory at the given offset in
++ * the buffer, with the length of the span being capped by the number of bytes
++ * used in the buffer after the offset (i.e. bytes remaining after the
++ * offset).
++ *
++ * Warning: This function does not validate that @offset is less than or equal
++ * to the number of bytes used in the buffer or the buffer capacity. This must
++ * be guaranteed by the caller.
++ */
++static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
++				      struct ssam_span *span)
++{
++	span->ptr = buf->ptr + offset;
++	span->len = buf->len - offset;
++}
++
++bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
++
++int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
++		     struct ssh_frame **frame, struct ssam_span *payload,
++		     size_t maxlen);
++
++int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
++		       struct ssh_command **command,
++		       struct ssam_span *command_data);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+new file mode 100644
+index 000000000000..66c839a995f3
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -0,0 +1,1211 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH request transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/ktime.h>
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include <linux/surface_aggregator/controller.h>
++
++#include "ssh_packet_layer.h"
++#include "ssh_request_layer.h"
++
++/*
++ * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
++ *
++ * Timeout as ktime_t delta for request responses. If we have not received a
++ * response in this time-frame after finishing the underlying packet
++ * transmission, the request will be completed with %-ETIMEDOUT as status
++ * code.
++ */
++#define SSH_RTL_REQUEST_TIMEOUT			ms_to_ktime(3000)
++
++/*
++ * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
++ *
++ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
++ * direct re-scheduling of reaper work_struct.
++ */
++#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION	ms_to_ktime(max(2000 / HZ, 50))
++
++/*
++ * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
++ *
++ * Maximum number of requests concurrently waiting to be completed (i.e.
++ * waiting for the corresponding packet transmission to finish if they don't
++ * have a response or waiting for a response if they have one).
++ */
++#define SSH_RTL_MAX_PENDING		3
++
++/*
++ * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
++ * Used to prevent livelocking of the workqueue. Value chosen via educated
++ * guess, may be adjusted.
++ */
++#define SSH_RTL_TX_BATCH		10
++
++static u16 ssh_request_get_rqid(struct ssh_request *rqst)
++{
++	return get_unaligned_le16(rqst->packet.data.ptr
++				  + SSH_MSGOFFSET_COMMAND(rqid));
++}
++
++static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
++{
++	if (!rqst->packet.data.ptr)
++		return U32_MAX;
++
++	return ssh_request_get_rqid(rqst);
++}
++
++static void ssh_rtl_queue_remove(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->queue.lock);
++
++	if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return;
++	}
++
++	list_del(&rqst->node);
++
++	spin_unlock(&rtl->queue.lock);
++	ssh_request_put(rqst);
++}
++
++static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
++{
++	bool empty;
++
++	spin_lock(&rtl->queue.lock);
++	empty = list_empty(&rtl->queue.head);
++	spin_unlock(&rtl->queue.lock);
++
++	return empty;
++}
++
++static void ssh_rtl_pending_remove(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->pending.lock);
++
++	if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return;
++	}
++
++	atomic_dec(&rtl->pending.count);
++	list_del(&rqst->node);
++
++	spin_unlock(&rtl->pending.lock);
++
++	ssh_request_put(rqst);
++}
++
++static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->pending.lock);
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return -EINVAL;
++	}
++
++	if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return -EALREADY;
++	}
++
++	atomic_inc(&rtl->pending.count);
++	list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
++
++	spin_unlock(&rtl->pending.lock);
++	return 0;
++}
++
++static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	/* rtl/ptl may not be set if we're canceling before submitting. */
++	rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
++		     ssh_request_get_rqid_safe(rqst), status);
++
++	rqst->ops->complete(rqst, NULL, NULL, status);
++}
++
++static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
++				      const struct ssh_command *cmd,
++				      const struct ssam_span *data)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
++		ssh_request_get_rqid(rqst));
++
++	rqst->ops->complete(rqst, cmd, data, 0);
++}
++
++static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
++		return !atomic_read(&rtl->pending.count);
++
++	return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
++}
++
++static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
++{
++	struct ssh_request *rqst = ERR_PTR(-ENOENT);
++	struct ssh_request *p, *n;
++
++	spin_lock(&rtl->queue.lock);
++
++	/* Find first non-locked request and remove it. */
++	list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
++		if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
++			continue;
++
++		if (!ssh_rtl_tx_can_process(p)) {
++			rqst = ERR_PTR(-EBUSY);
++			break;
++		}
++
++		/* Remove from queue and mark as transmitting. */
++		set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
++		/* Ensure state never gets zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
++
++		list_del(&p->node);
++
++		rqst = p;
++		break;
++	}
++
++	spin_unlock(&rtl->queue.lock);
++	return rqst;
++}
++
++static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
++{
++	struct ssh_request *rqst;
++	int status;
++
++	/* Get and prepare next request for transmit. */
++	rqst = ssh_rtl_tx_next(rtl);
++	if (IS_ERR(rqst))
++		return PTR_ERR(rqst);
++
++	/* Add it to/mark it as pending. */
++	status = ssh_rtl_tx_pending_push(rqst);
++	if (status) {
++		ssh_request_put(rqst);
++		return -EAGAIN;
++	}
++
++	/* Submit packet. */
++	status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
++	if (status == -ESHUTDOWN) {
++		/*
++		 * Packet has been refused due to the packet layer shutting
++		 * down. Complete it here.
++		 */
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
++		/*
++		 * Note: A barrier is not required here, as there are only two
++		 * references in the system at this point: The one that we have,
++		 * and the other one that belongs to the pending set. Due to the
++		 * request being marked as "transmitting", our process is the
++		 * only one allowed to remove the pending node and change the
++		 * state. Normally, the task would fall to the packet callback,
++		 * but as this is a path where submission failed, this callback
++		 * will never be executed.
++		 */
++
++		ssh_rtl_pending_remove(rqst);
++		ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
++
++		ssh_request_put(rqst);
++		return -ESHUTDOWN;
++
++	} else if (status) {
++		/*
++		 * If submitting the packet failed and the packet layer isn't
++		 * shutting down, the packet has either been submitted/queued
++		 * before (-EALREADY, which cannot happen as we have
++		 * guaranteed that requests cannot be re-submitted), or the
++		 * packet was marked as locked (-EINVAL). To mark the packet
++		 * locked at this stage, the request, and thus the packets
++		 * itself, had to have been canceled. Simply drop the
++		 * reference. Cancellation itself will remove it from the set
++		 * of pending requests.
++		 */
++
++		WARN_ON(status != -EINVAL);
++
++		ssh_request_put(rqst);
++		return -EAGAIN;
++	}
++
++	ssh_request_put(rqst);
++	return 0;
++}
++
++static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
++{
++	if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
++		return false;
++
++	if (ssh_rtl_queue_empty(rtl))
++		return false;
++
++	return schedule_work(&rtl->tx.work);
++}
++
++static void ssh_rtl_tx_work_fn(struct work_struct *work)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
++	unsigned int iterations = SSH_RTL_TX_BATCH;
++	int status;
++
++	/*
++	 * Try to be nice and not block/live-lock the workqueue: Run a maximum
++	 * of 10 tries, then re-submit if necessary. This should not be
++	 * necessary for normal execution, but guarantee it anyway.
++	 */
++	do {
++		status = ssh_rtl_tx_try_process_one(rtl);
++		if (status == -ENOENT || status == -EBUSY)
++			return;		/* No more requests to process. */
++
++		if (status == -ESHUTDOWN) {
++			/*
++			 * Packet system shutting down. No new packets can be
++			 * transmitted. Return silently, the party initiating
++			 * the shutdown should handle the rest.
++			 */
++			return;
++		}
++
++		WARN_ON(status != 0 && status != -EAGAIN);
++	} while (--iterations);
++
++	/* Out of tries, reschedule. */
++	ssh_rtl_tx_schedule(rtl);
++}
++
++/**
++ * ssh_rtl_submit() - Submit a request to the transport layer.
++ * @rtl:  The request transport layer.
++ * @rqst: The request to submit.
++ *
++ * Submits a request to the transport layer. A single request may not be
++ * submitted multiple times without reinitializing it.
++ *
++ * Return: Returns zero on success, %-EINVAL if the request type is invalid or
++ * the request has been canceled prior to submission, %-EALREADY if the
++ * request has already been submitted, or %-ESHUTDOWN in case the request
++ * transport layer has been shut down.
++ */
++int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
++{
++	/*
++	 * Ensure that requests expecting a response are sequenced. If this
++	 * invariant ever changes, see the comment in ssh_rtl_complete() on what
++	 * is required to be changed in the code.
++	 */
++	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
++		if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
++			return -EINVAL;
++
++	spin_lock(&rtl->queue.lock);
++
++	/*
++	 * Try to set ptl and check if this request has already been submitted.
++	 *
++	 * Must be inside lock as we might run into a lost update problem
++	 * otherwise: If this were outside of the lock, cancellation in
++	 * ssh_rtl_cancel_nonpending() may run after we've set the ptl
++	 * reference but before we enter the lock. In that case, we'd detect
++	 * that the request is being added to the queue and would try to remove
++	 * it from that, but removal might fail because it hasn't actually been
++	 * added yet. By putting this cmpxchg in the critical section, we
++	 * ensure that the queuing detection only triggers when we are already
++	 * in the critical section and the remove process will wait until the
++	 * push operation has been completed (via lock) due to that. Only then,
++	 * we can safely try to remove it.
++	 */
++	if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
++		spin_unlock(&rtl->queue.lock);
++		return -EALREADY;
++	}
++
++	/*
++	 * Ensure that we set ptl reference before we continue modifying state.
++	 * This is required for non-pending cancellation. This barrier is paired
++	 * with the one in ssh_rtl_cancel_nonpending().
++	 *
++	 * By setting the ptl reference before we test for "locked", we can
++	 * check if the "locked" test may have already run. See comments in
++	 * ssh_rtl_cancel_nonpending() for more detail.
++	 */
++	smp_mb__after_atomic();
++
++	if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return -ESHUTDOWN;
++	}
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return -EINVAL;
++	}
++
++	set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
++	list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
++
++	spin_unlock(&rtl->queue.lock);
++
++	ssh_rtl_tx_schedule(rtl);
++	return 0;
++}
++
++static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
++				       ktime_t expires)
++{
++	unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
++	ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
++
++	spin_lock(&rtl->rtx_timeout.lock);
++
++	/* Re-adjust / schedule reaper only if it is above resolution delta. */
++	if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
++		rtl->rtx_timeout.expires = expires;
++		mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
++	}
++
++	spin_unlock(&rtl->rtx_timeout.lock);
++}
++
++static void ssh_rtl_timeout_start(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++	ktime_t timestamp = ktime_get_coarse_boottime();
++	ktime_t timeout = rtl->rtx_timeout.timeout;
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
++		return;
++
++	/*
++	 * Note: The timestamp gets set only once. This happens on the packet
++	 * callback. All other access to it is read-only.
++	 */
++	WRITE_ONCE(rqst->timestamp, timestamp);
++	/*
++	 * Ensure timestamp is set before starting the reaper. Paired with
++	 * implicit barrier following check on ssh_request_get_expiration() in
++	 * ssh_rtl_timeout_reap.
++	 */
++	smp_mb__after_atomic();
++
++	ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
++}
++
++static void ssh_rtl_complete(struct ssh_rtl *rtl,
++			     const struct ssh_command *command,
++			     const struct ssam_span *command_data)
++{
++	struct ssh_request *r = NULL;
++	struct ssh_request *p, *n;
++	u16 rqid = get_unaligned_le16(&command->rqid);
++
++	/*
++	 * Get request from pending based on request ID and mark it as response
++	 * received and locked.
++	 */
++	spin_lock(&rtl->pending.lock);
++	list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
++		/* We generally expect requests to be processed in order. */
++		if (unlikely(ssh_request_get_rqid(p) != rqid))
++			continue;
++
++		/*
++		 * Mark as "response received" and "locked" as we're going to
++		 * complete it.
++		 */
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
++		set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
++		/* Ensure state never gets zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&rtl->pending.count);
++		list_del(&p->node);
++
++		r = p;
++		break;
++	}
++	spin_unlock(&rtl->pending.lock);
++
++	if (!r) {
++		rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
++			 rqid);
++		return;
++	}
++
++	/* If the request hasn't been completed yet, we will do this now. */
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
++		ssh_request_put(r);
++		ssh_rtl_tx_schedule(rtl);
++		return;
++	}
++
++	/*
++	 * Make sure the request has been transmitted. In case of a sequenced
++	 * request, we are guaranteed that the completion callback will run on
++	 * the receiver thread directly when the ACK for the packet has been
++	 * received. Similarly, this function is guaranteed to run on the
++	 * receiver thread. Thus we are guaranteed that if the packet has been
++	 * successfully transmitted and received an ACK, the transmitted flag
++	 * has been set and is visible here.
++	 *
++	 * We are currently not handling unsequenced packets here, as those
++	 * should never expect a response as ensured in ssh_rtl_submit. If this
++	 * ever changes, one would have to test for
++	 *
++	 *	(r->state & (transmitting | transmitted))
++	 *
++	 * on unsequenced packets to determine if they could have been
++	 * transmitted. There are no synchronization guarantees as in the
++	 * sequenced case, since, in this case, the callback function will not
++	 * run on the same thread. Thus an exact determination is impossible.
++	 */
++	if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
++		rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
++			rqid);
++
++		/*
++		 * NB: Timeout has already been canceled, request already been
++		 * removed from pending and marked as locked and completed. As
++		 * we receive a "false" response, the packet might still be
++		 * queued though.
++		 */
++		ssh_rtl_queue_remove(r);
++
++		ssh_rtl_complete_with_status(r, -EREMOTEIO);
++		ssh_request_put(r);
++
++		ssh_rtl_tx_schedule(rtl);
++		return;
++	}
++
++	/*
++	 * NB: Timeout has already been canceled, request already been
++	 * removed from pending and marked as locked and completed. The request
++	 * can also not be queued any more, as it has been marked as
++	 * transmitting and later transmitted. Thus no need to remove it from
++	 * anywhere.
++	 */
++
++	ssh_rtl_complete_with_rsp(r, command, command_data);
++	ssh_request_put(r);
++
++	ssh_rtl_tx_schedule(rtl);
++}
++
++static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
++{
++	struct ssh_rtl *rtl;
++	unsigned long flags, fixed;
++	bool remove;
++
++	/*
++	 * Handle unsubmitted request: Try to mark the packet as locked,
++	 * expecting the state to be zero (i.e. unsubmitted). Note that, if
++	 * setting the state worked, we might still be adding the packet to the
++	 * queue in a currently executing submit call. In that case, however,
++	 * ptl reference must have been set previously, as locked is checked
++	 * after setting ptl. Furthermore, when the ptl reference is set, the
++	 * submission process is guaranteed to have entered the critical
++	 * section. Thus only if we successfully locked this request and ptl is
++	 * NULL, we have successfully removed the request, i.e. we are
++	 * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
++	 * packet will never be added. Otherwise, we need to try and grab it
++	 * from the queue, where we are now guaranteed that the packet is or has
++	 * been due to the critical section.
++	 *
++	 * Note that if the cmpxchg() fails, we are guaranteed that ptl has
++	 * been set and is non-NULL, as states can only be nonzero after this
++	 * has been set. Also note that we need to fetch the static (type)
++	 * flags to ensure that they don't cause the cmpxchg() to fail.
++	 */
++	fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
++	flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
++
++	/*
++	 * Force correct ordering with regards to state and ptl reference access
++	 * to safe-guard cancellation to concurrent submission against a
++	 * lost-update problem. First try to exchange state, then also check
++	 * ptl if that worked. This barrier is paired with the
++	 * one in ssh_rtl_submit().
++	 */
++	smp_mb__after_atomic();
++
++	if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return true;
++
++		ssh_rtl_complete_with_status(r, -ECANCELED);
++		return true;
++	}
++
++	rtl = ssh_request_rtl(r);
++	spin_lock(&rtl->queue.lock);
++
++	/*
++	 * Note: 1) Requests cannot be re-submitted. 2) If a request is
++	 * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
++	 * successfully remove the request here, we have removed all its
++	 * occurrences in the system.
++	 */
++
++	remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
++	if (!remove) {
++		spin_unlock(&rtl->queue.lock);
++		return false;
++	}
++
++	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++	list_del(&r->node);
++
++	spin_unlock(&rtl->queue.lock);
++
++	ssh_request_put(r);	/* Drop reference obtained from queue. */
++
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return true;
++
++	ssh_rtl_complete_with_status(r, -ECANCELED);
++	return true;
++}
++
++static bool ssh_rtl_cancel_pending(struct ssh_request *r)
++{
++	/* If the packet is already locked, it's going to be removed shortly. */
++	if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
++		return true;
++
++	/*
++	 * Now that we have locked the packet, we have guaranteed that it can't
++	 * be added to the system any more. If ptl is NULL, the locked
++	 * check in ssh_rtl_submit() has not been run and any submission,
++	 * currently in progress or called later, won't add the packet. Thus we
++	 * can directly complete it.
++	 *
++	 * The implicit memory barrier of test_and_set_bit() should be enough
++	 * to ensure that the correct order (first lock, then check ptl) is
++	 * ensured. This is paired with the barrier in ssh_rtl_submit().
++	 */
++	if (!READ_ONCE(r->packet.ptl)) {
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return true;
++
++		ssh_rtl_complete_with_status(r, -ECANCELED);
++		return true;
++	}
++
++	/*
++	 * Try to cancel the packet. If the packet has not been completed yet,
++	 * this will subsequently (and synchronously) call the completion
++	 * callback of the packet, which will complete the request.
++	 */
++	ssh_ptl_cancel(&r->packet);
++
++	/*
++	 * If the packet has been completed with success, i.e. has not been
++	 * canceled by the above call, the request may not have been completed
++	 * yet (may be waiting for a response). Check if we need to do this
++	 * here.
++	 */
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return true;
++
++	ssh_rtl_queue_remove(r);
++	ssh_rtl_pending_remove(r);
++	ssh_rtl_complete_with_status(r, -ECANCELED);
++
++	return true;
++}
++
++/**
++ * ssh_rtl_cancel() - Cancel request.
++ * @rqst:    The request to cancel.
++ * @pending: Whether to also cancel pending requests.
++ *
++ * Cancels the given request. If @pending is %false, this will not cancel
++ * pending requests, i.e. requests that have already been submitted to the
++ * packet layer but not been completed yet. If @pending is %true, this will
++ * cancel the given request regardless of the state it is in.
++ *
++ * If the request has been canceled by calling this function, both completion
++ * and release callbacks of the request will be executed in a reasonable
++ * time-frame. This may happen during execution of this function, however,
++ * there is no guarantee for this. For example, a request currently
++ * transmitting will be canceled/completed only after transmission has
++ * completed, and the respective callbacks will be executed on the transmitter
++ * thread, which may happen during, but also some time after execution of the
++ * cancel function.
++ *
++ * Return: Returns %true if the given request has been canceled or completed,
++ * either by this function or prior to calling this function, %false
++ * otherwise. If @pending is %true, this function will always return %true.
++ */
++bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
++{
++	struct ssh_rtl *rtl;
++	bool canceled;
++
++	if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
++		return true;
++
++	if (pending)
++		canceled = ssh_rtl_cancel_pending(rqst);
++	else
++		canceled = ssh_rtl_cancel_nonpending(rqst);
++
++	/* Note: rtl may be NULL if request has not been submitted yet. */
++	rtl = ssh_request_rtl(rqst);
++	if (canceled && rtl)
++		ssh_rtl_tx_schedule(rtl);
++
++	return canceled;
++}
++
++static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
++{
++	struct ssh_request *r = to_ssh_request(p);
++
++	if (unlikely(status)) {
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return;
++
++		/*
++		 * The packet may get canceled even though it has not been
++		 * submitted yet. The request may still be queued. Check the
++		 * queue and remove it if necessary. As the timeout would have
++		 * been started in this function on success, there's no need
++		 * to cancel it here.
++		 */
++		ssh_rtl_queue_remove(r);
++		ssh_rtl_pending_remove(r);
++		ssh_rtl_complete_with_status(r, status);
++
++		ssh_rtl_tx_schedule(ssh_request_rtl(r));
++		return;
++	}
++
++	/* Update state: Mark as transmitted and clear transmitting. */
++	set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
++	/* Ensure state never gets zero. */
++	smp_mb__before_atomic();
++	clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
++
++	/* If we expect a response, we just need to start the timeout. */
++	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
++		/*
++		 * Note: This is the only place where the timestamp gets set,
++		 * all other access to it is read-only.
++		 */
++		ssh_rtl_timeout_start(r);
++		return;
++	}
++
++	/*
++	 * If we don't expect a response, lock, remove, and complete the
++	 * request. Note that, at this point, the request is guaranteed to have
++	 * left the queue and no timeout has been started. Thus we only need to
++	 * remove it from pending. If the request has already been completed (it
++	 * may have been canceled) return.
++	 */
++
++	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return;
++
++	ssh_rtl_pending_remove(r);
++	ssh_rtl_complete_with_status(r, 0);
++
++	ssh_rtl_tx_schedule(ssh_request_rtl(r));
++}
++
++static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
++{
++	ktime_t timestamp = READ_ONCE(r->timestamp);
++
++	if (timestamp != KTIME_MAX)
++		return ktime_add(timestamp, timeout);
++	else
++		return KTIME_MAX;
++}
++
++static void ssh_rtl_timeout_reap(struct work_struct *work)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
++	struct ssh_request *r, *n;
++	LIST_HEAD(claimed);
++	ktime_t now = ktime_get_coarse_boottime();
++	ktime_t timeout = rtl->rtx_timeout.timeout;
++	ktime_t next = KTIME_MAX;
++
++	/*
++	 * Mark reaper as "not pending". This is done before checking any
++	 * requests to avoid lost-update type problems.
++	 */
++	spin_lock(&rtl->rtx_timeout.lock);
++	rtl->rtx_timeout.expires = KTIME_MAX;
++	spin_unlock(&rtl->rtx_timeout.lock);
++
++	spin_lock(&rtl->pending.lock);
++	list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
++		ktime_t expires = ssh_request_get_expiration(r, timeout);
++
++		/*
++		 * Check if the timeout hasn't expired yet. Find out next
++		 * expiration date to be handled after this run.
++		 */
++		if (ktime_after(expires, now)) {
++			next = ktime_before(expires, next) ? expires : next;
++			continue;
++		}
++
++		/* Avoid further transitions if locked. */
++		if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
++			continue;
++
++		/*
++		 * We have now marked the packet as locked. Thus it cannot be
++		 * added to the pending or queued lists again after we've
++		 * removed it here. We can therefore re-use the node of this
++		 * packet temporarily.
++		 */
++
++		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
++
++		atomic_dec(&rtl->pending.count);
++		list_del(&r->node);
++
++		list_add_tail(&r->node, &claimed);
++	}
++	spin_unlock(&rtl->pending.lock);
++
++	/* Cancel and complete the request. */
++	list_for_each_entry_safe(r, n, &claimed, node) {
++		/*
++		 * At this point we've removed the packet from pending. This
++		 * means that we've obtained the last (only) reference of the
++		 * system to it. Thus we can just complete it.
++		 */
++		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			ssh_rtl_complete_with_status(r, -ETIMEDOUT);
++
++		/*
++		 * Drop the reference we've obtained by removing it from the
++		 * pending set.
++		 */
++		list_del(&r->node);
++		ssh_request_put(r);
++	}
++
++	/* Ensure that the reaper doesn't run again immediately. */
++	next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
++	if (next != KTIME_MAX)
++		ssh_rtl_timeout_reaper_mod(rtl, now, next);
++
++	ssh_rtl_tx_schedule(rtl);
++}
++
++static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
++			     const struct ssam_span *data)
++{
++	rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
++		get_unaligned_le16(&cmd->rqid));
++
++	rtl->ops.handle_event(rtl, cmd, data);
++}
++
++static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
++	struct device *dev = &p->serdev->dev;
++	struct ssh_command *command;
++	struct ssam_span command_data;
++
++	if (sshp_parse_command(dev, data, &command, &command_data))
++		return;
++
++	if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
++		ssh_rtl_rx_event(rtl, command, &command_data);
++	else
++		ssh_rtl_complete(rtl, command, &command_data);
++}
++
++static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
++{
++	if (!data->len) {
++		ptl_err(p, "rtl: rx: no data frame payload\n");
++		return;
++	}
++
++	switch (data->ptr[0]) {
++	case SSH_PLD_TYPE_CMD:
++		ssh_rtl_rx_command(p, data);
++		break;
++
++	default:
++		ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
++			data->ptr[0]);
++		break;
++	}
++}
++
++static void ssh_rtl_packet_release(struct ssh_packet *p)
++{
++	struct ssh_request *rqst;
++
++	rqst = to_ssh_request(p);
++	rqst->ops->release(rqst);
++}
++
++static const struct ssh_packet_ops ssh_rtl_packet_ops = {
++	.complete = ssh_rtl_packet_callback,
++	.release = ssh_rtl_packet_release,
++};
++
++/**
++ * ssh_request_init() - Initialize SSH request.
++ * @rqst:  The request to initialize.
++ * @flags: Request flags, determining the type of the request.
++ * @ops:   Request operations.
++ *
++ * Initializes the given SSH request and underlying packet. Sets the message
++ * buffer pointer to %NULL and the message buffer length to zero. This buffer
++ * has to be set separately via ssh_request_set_data() before submission and
++ * must contain a valid SSH request message.
++ *
++ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
++ */
++int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
++		     const struct ssh_request_ops *ops)
++{
++	unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
++
++	/* Unsequenced requests cannot have a response. */
++	if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
++		return -EINVAL;
++
++	if (!(flags & SSAM_REQUEST_UNSEQUENCED))
++		type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
++
++	ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
++			&ssh_rtl_packet_ops);
++
++	INIT_LIST_HEAD(&rqst->node);
++
++	rqst->state = 0;
++	if (flags & SSAM_REQUEST_HAS_RESPONSE)
++		rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
++
++	rqst->timestamp = KTIME_MAX;
++	rqst->ops = ops;
++
++	return 0;
++}
++
++/**
++ * ssh_rtl_init() - Initialize request transport layer.
++ * @rtl:    The request transport layer to initialize.
++ * @serdev: The underlying serial device, i.e. the lower-level transport.
++ * @ops:    Request transport layer operations.
++ *
++ * Initializes the given request transport layer and associated packet
++ * transport layer. Transmitter and receiver threads must be started
++ * separately via ssh_rtl_tx_start() and ssh_rtl_rx_start(), after the
++ * request-layer has been initialized and the lower-level serial device layer
++ * has been set up.
++ *
++ * Return: Returns zero on success and a nonzero error code on failure.
++ */
++int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
++		 const struct ssh_rtl_ops *ops)
++{
++	struct ssh_ptl_ops ptl_ops;
++	int status;
++
++	ptl_ops.data_received = ssh_rtl_rx_data;
++
++	status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
++	if (status)
++		return status;
++
++	spin_lock_init(&rtl->queue.lock);
++	INIT_LIST_HEAD(&rtl->queue.head);
++
++	spin_lock_init(&rtl->pending.lock);
++	INIT_LIST_HEAD(&rtl->pending.head);
++	atomic_set_release(&rtl->pending.count, 0);
++
++	INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
++
++	spin_lock_init(&rtl->rtx_timeout.lock);
++	rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
++	rtl->rtx_timeout.expires = KTIME_MAX;
++	INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
++
++	rtl->ops = *ops;
++
++	return 0;
++}
++
++/**
++ * ssh_rtl_destroy() - Deinitialize request transport layer.
++ * @rtl: The request transport layer to deinitialize.
++ *
++ * Deinitializes the given request transport layer and frees resources
++ * associated with it. If receiver and/or transmitter threads have been
++ * started, the layer must first be shut down via ssh_rtl_shutdown() before
++ * this function can be called.
++ */
++void ssh_rtl_destroy(struct ssh_rtl *rtl)
++{
++	ssh_ptl_destroy(&rtl->ptl);
++}
++
++/**
++ * ssh_rtl_tx_start() - Start request transmitter and receiver.
++ * @rtl: The request transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_rtl_start(struct ssh_rtl *rtl)
++{
++	int status;
++
++	status = ssh_ptl_tx_start(&rtl->ptl);
++	if (status)
++		return status;
++
++	ssh_rtl_tx_schedule(rtl);
++
++	status = ssh_ptl_rx_start(&rtl->ptl);
++	if (status) {
++		ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
++		ssh_ptl_tx_stop(&rtl->ptl);
++		return status;
++	}
++
++	return 0;
++}
++
++struct ssh_flush_request {
++	struct ssh_request base;
++	struct completion completion;
++	int status;
++};
++
++static void ssh_rtl_flush_request_complete(struct ssh_request *r,
++					   const struct ssh_command *cmd,
++					   const struct ssam_span *data,
++					   int status)
++{
++	struct ssh_flush_request *rqst;
++
++	rqst = container_of(r, struct ssh_flush_request, base);
++	rqst->status = status;
++}
++
++static void ssh_rtl_flush_request_release(struct ssh_request *r)
++{
++	struct ssh_flush_request *rqst;
++
++	rqst = container_of(r, struct ssh_flush_request, base);
++	complete_all(&rqst->completion);
++}
++
++static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
++	.complete = ssh_rtl_flush_request_complete,
++	.release = ssh_rtl_flush_request_release,
++};
++
++/**
++ * ssh_rtl_flush() - Flush the request transport layer.
++ * @rtl:     request transport layer
++ * @timeout: timeout for the flush operation in jiffies
++ *
++ * Queue a special flush request and wait for its completion. This request
++ * will be completed after all other currently queued and pending requests
++ * have been completed. Instead of a normal data packet, this request submits
++ * a special flush packet, meaning that upon completion, also the underlying
++ * packet transport layer has been flushed.
++ *
++ * Flushing the request layer guarantees that all previously submitted
++ * requests have been fully completed before this call returns. Additionally,
++ * flushing blocks execution of all later submitted requests until the flush
++ * has been completed.
++ *
++ * If the caller ensures that no new requests are submitted after a call to
++ * this function, the request transport layer is guaranteed to have no
++ * remaining requests when this call returns. The same guarantee does not hold
++ * for the packet layer, on which control packets may still be queued after
++ * this call.
++ *
++ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
++ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
++ * and/or request transport layer has been shut down before this call. May
++ * also return %-EINTR if the underlying packet transmission has been
++ * interrupted.
++ */
++int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
++{
++	const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
++	struct ssh_flush_request rqst;
++	int status;
++
++	ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
++	rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
++	rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
++	rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
++
++	init_completion(&rqst.completion);
++
++	status = ssh_rtl_submit(rtl, &rqst.base);
++	if (status)
++		return status;
++
++	ssh_request_put(&rqst.base);
++
++	if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
++		ssh_rtl_cancel(&rqst.base, true);
++		wait_for_completion(&rqst.completion);
++	}
++
++	WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
++		rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
++
++	return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
++}
++
++/**
++ * ssh_rtl_shutdown() - Shut down request transport layer.
++ * @rtl: The request transport layer.
++ *
++ * Shuts down the request transport layer, removing and canceling all queued
++ * and pending requests. Requests canceled by this operation will be completed
++ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
++ * stopped, the lower-level packet layer will be shutdown.
++ *
++ * As a result of this function, the transport layer will be marked as shut
++ * down. Submission of requests after the transport layer has been shut down
++ * will fail with %-ESHUTDOWN.
++ */
++void ssh_rtl_shutdown(struct ssh_rtl *rtl)
++{
++	struct ssh_request *r, *n;
++	LIST_HEAD(claimed);
++	int pending;
++
++	set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
++	/*
++	 * Ensure that the layer gets marked as shut-down before actually
++	 * stopping it. In combination with the check in ssh_rtl_submit(),
++	 * this guarantees that no new requests can be added and all already
++	 * queued requests are properly canceled.
++	 */
++	smp_mb__after_atomic();
++
++	/* Remove requests from queue. */
++	spin_lock(&rtl->queue.lock);
++	list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++		/* Ensure state never gets zero. */
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
++
++		list_del(&r->node);
++		list_add_tail(&r->node, &claimed);
++	}
++	spin_unlock(&rtl->queue.lock);
++
++	/*
++	 * We have now guaranteed that the queue is empty and no more new
++	 * requests can be submitted (i.e. it will stay empty). This means that
++	 * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
++	 * we can simply call cancel_work_sync() on tx.work here and when that
++	 * returns, we've locked it down. This also means that after this call,
++	 * we don't submit any more packets to the underlying packet layer, so
++	 * we can also shut that down.
++	 */
++
++	cancel_work_sync(&rtl->tx.work);
++	ssh_ptl_shutdown(&rtl->ptl);
++	cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
++
++	/*
++	 * Shutting down the packet layer should also have canceled all
++	 * requests. Thus the pending set should be empty. Attempt to handle
++	 * this gracefully anyways, even though this should be dead code.
++	 */
++
++	pending = atomic_read(&rtl->pending.count);
++	if (WARN_ON(pending)) {
++		spin_lock(&rtl->pending.lock);
++		list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
++			set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++			/* Ensure state never gets zero. */
++			smp_mb__before_atomic();
++			clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
++
++			list_del(&r->node);
++			list_add_tail(&r->node, &claimed);
++		}
++		spin_unlock(&rtl->pending.lock);
++	}
++
++	/* Finally, cancel and complete the requests we claimed before. */
++	list_for_each_entry_safe(r, n, &claimed, node) {
++		/*
++		 * We need test_and_set() because we still might compete with
++		 * cancellation.
++		 */
++		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			ssh_rtl_complete_with_status(r, -ESHUTDOWN);
++
++		/*
++		 * Drop the reference we've obtained by removing it from the
++		 * lists.
++		 */
++		list_del(&r->node);
++		ssh_request_put(r);
++	}
++}
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
+new file mode 100644
+index 000000000000..cb35815858d1
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
+@@ -0,0 +1,143 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH request transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
++#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
++
++#include <linux/atomic.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include <linux/surface_aggregator/controller.h>
++
++#include "ssh_packet_layer.h"
++
++/**
++ * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl.
++ *
++ * @SSH_RTL_SF_SHUTDOWN_BIT:
++ *	Indicates that the request transport layer has been shut down or is
++ *	being shut down and should not accept any new requests.
++ */
++enum ssh_rtl_state_flags {
++	SSH_RTL_SF_SHUTDOWN_BIT,
++};
++
++/**
++ * struct ssh_rtl_ops - Callback operations for request transport layer.
++ * @handle_event: Function called when a SSH event has been received. The
++ *                specified function takes the request layer, received command
++ *                struct, and corresponding payload as arguments. If the event
++ *                has no payload, the payload span is empty (not %NULL).
++ */
++struct ssh_rtl_ops {
++	void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
++			     const struct ssam_span *data);
++};
++
++/**
++ * struct ssh_rtl - SSH request transport layer.
++ * @ptl:           Underlying packet transport layer.
++ * @state:         State(-flags) of the transport layer.
++ * @queue:         Request submission queue.
++ * @queue.lock:    Lock for modifying the request submission queue.
++ * @queue.head:    List-head of the request submission queue.
++ * @pending:       Set/list of pending requests.
++ * @pending.lock:  Lock for modifying the request set.
++ * @pending.head:  List-head of the pending set/list.
++ * @pending.count: Number of currently pending requests.
++ * @tx:            Transmitter subsystem.
++ * @tx.work:       Transmitter work item.
++ * @rtx_timeout:   Retransmission timeout subsystem.
++ * @rtx_timeout.lock:    Lock for modifying the retransmission timeout reaper.
++ * @rtx_timeout.timeout: Timeout interval for retransmission.
++ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
++ * @rtx_timeout.reaper:  Work performing timeout checks and subsequent actions.
++ * @ops:           Request layer operations.
++ */
++struct ssh_rtl {
++	struct ssh_ptl ptl;
++	unsigned long state;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++	} queue;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++		atomic_t count;
++	} pending;
++
++	struct {
++		struct work_struct work;
++	} tx;
++
++	struct {
++		spinlock_t lock;
++		ktime_t timeout;
++		ktime_t expires;
++		struct delayed_work reaper;
++	} rtx_timeout;
++
++	struct ssh_rtl_ops ops;
++};
++
++#define rtl_dbg(r, fmt, ...)  ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_err(r, fmt, ...)  ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
++
++#define to_ssh_rtl(ptr, member) \
++	container_of(ptr, struct ssh_rtl, member)
++
++/**
++ * ssh_rtl_get_device() - Get device associated with request transport layer.
++ * @rtl: The request transport layer.
++ *
++ * Return: Returns the device on which the given request transport layer
++ * builds upon.
++ */
++static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
++{
++	return ssh_ptl_get_device(&rtl->ptl);
++}
++
++/**
++ * ssh_request_rtl() - Get request transport layer associated with request.
++ * @rqst: The request to get the request transport layer reference for.
++ *
++ * Return: Returns the &struct ssh_rtl associated with the given SSH request.
++ */
++static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
++{
++	struct ssh_ptl *ptl;
++
++	ptl = READ_ONCE(rqst->packet.ptl);
++	return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
++}
++
++int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
++bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
++
++int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
++		 const struct ssh_rtl_ops *ops);
++
++int ssh_rtl_start(struct ssh_rtl *rtl);
++int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
++void ssh_rtl_shutdown(struct ssh_rtl *rtl);
++void ssh_rtl_destroy(struct ssh_rtl *rtl);
++
++int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
++		     const struct ssh_request_ops *ops);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */
+diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
+new file mode 100644
+index 000000000000..f4b1ba887384
+--- /dev/null
++++ b/include/linux/surface_aggregator/controller.h
+@@ -0,0 +1,824 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module (SSAM) controller interface.
++ *
++ * Main communication interface for the SSAM EC. Provides a controller
++ * managing access and communication to and from the SSAM EC, as well as main
++ * communication structures and definitions.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
++#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
++
++#include <linux/completion.h>
++#include <linux/device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++
++/* -- Main data types and definitions --------------------------------------- */
++
++/**
++ * enum ssam_event_flags - Flags for enabling/disabling SAM events
++ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
++ */
++enum ssam_event_flags {
++	SSAM_EVENT_SEQUENCED = BIT(0),
++};
++
++/**
++ * struct ssam_event - SAM event sent from the EC to the host.
++ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
++ * @target_id:       Target ID of the event source.
++ * @command_id:      Command ID of the event.
++ * @instance_id:     Instance ID of the event source.
++ * @length:          Length of the event payload in bytes.
++ * @data:            Event payload data.
++ */
++struct ssam_event {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u16 length;
++	u8 data[];
++};
++
++/**
++ * enum ssam_request_flags - Flags for SAM requests.
++ *
++ * @SSAM_REQUEST_HAS_RESPONSE:
++ *	Specifies that the request expects a response. If not set, the request
++ *	will be directly completed after its underlying packet has been
++ *	transmitted. If set, the request transport system waits for a response
++ *	of the request.
++ *
++ * @SSAM_REQUEST_UNSEQUENCED:
++ *	Specifies that the request should be transmitted via an unsequenced
++ *	packet. If set, the request must not have a response, meaning that this
++ *	flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
++ */
++enum ssam_request_flags {
++	SSAM_REQUEST_HAS_RESPONSE = BIT(0),
++	SSAM_REQUEST_UNSEQUENCED  = BIT(1),
++};
++
++/**
++ * struct ssam_request - SAM request description.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @target_id:       ID of the request's target.
++ * @command_id:      Command ID of the request.
++ * @instance_id:     Instance ID of the request's target.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ * @length:          Length of the request payload in bytes.
++ * @payload:         Request payload data.
++ *
++ * This struct fully describes a SAM request with payload. It is intended to
++ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
++ * and specifically its raw message data via ssam_request_write_data().
++ */
++struct ssam_request {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u16 flags;
++	u16 length;
++	const u8 *payload;
++};
++
++/**
++ * struct ssam_response - Response buffer for SAM request.
++ * @capacity: Capacity of the buffer, in bytes.
++ * @length:   Length of the actual data stored in the memory pointed to by
++ *            @pointer, in bytes. Set by the transport system.
++ * @pointer:  Pointer to the buffer's memory, storing the response payload data.
++ */
++struct ssam_response {
++	size_t capacity;
++	size_t length;
++	u8 *pointer;
++};
++
++struct ssam_controller;
++
++struct ssam_controller *ssam_get_controller(void);
++struct ssam_controller *ssam_client_bind(struct device *client);
++int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
++
++struct device *ssam_controller_device(struct ssam_controller *c);
++
++struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
++void ssam_controller_put(struct ssam_controller *c);
++
++void ssam_controller_statelock(struct ssam_controller *c);
++void ssam_controller_stateunlock(struct ssam_controller *c);
++
++ssize_t ssam_request_write_data(struct ssam_span *buf,
++				struct ssam_controller *ctrl,
++				const struct ssam_request *spec);
++
++
++/* -- Synchronous request interface. ---------------------------------------- */
++
++/**
++ * struct ssam_request_sync - Synchronous SAM request struct.
++ * @base:   Underlying SSH request.
++ * @comp:   Completion used to signal full completion of the request. After the
++ *          request has been submitted, this struct may only be modified or
++ *          deallocated after the completion has been signaled.
++ *          request has been submitted,
++ * @resp:   Buffer to store the response.
++ * @status: Status of the request, set after the base request has been
++ *          completed or has failed.
++ */
++struct ssam_request_sync {
++	struct ssh_request base;
++	struct completion comp;
++	struct ssam_response *resp;
++	int status;
++};
++
++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
++			    struct ssam_request_sync **rqst,
++			    struct ssam_span *buffer);
++
++void ssam_request_sync_free(struct ssam_request_sync *rqst);
++
++int ssam_request_sync_init(struct ssam_request_sync *rqst,
++			   enum ssam_request_flags flags);
++
++/**
++ * ssam_request_sync_set_data - Set message data of a synchronous request.
++ * @rqst: The request.
++ * @ptr:  Pointer to the request message data.
++ * @len:  Length of the request message data.
++ *
++ * Set the request message data of a synchronous request. The provided buffer
++ * needs to live until the request has been completed.
++ */
++static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
++					      u8 *ptr, size_t len)
++{
++	ssh_request_set_data(&rqst->base, ptr, len);
++}
++
++/**
++ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
++ * @rqst: The request.
++ * @resp: The response buffer.
++ *
++ * Sets the response buffer of a synchronous request. This buffer will store
++ * the response of the request after it has been completed. May be %NULL if no
++ * response is expected.
++ */
++static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
++					      struct ssam_response *resp)
++{
++	rqst->resp = resp;
++}
++
++int ssam_request_sync_submit(struct ssam_controller *ctrl,
++			     struct ssam_request_sync *rqst);
++
++/**
++ * ssam_request_sync_wait - Wait for completion of a synchronous request.
++ * @rqst: The request to wait for.
++ *
++ * Wait for completion and release of a synchronous request. After this
++ * function terminates, the request is guaranteed to have left the transport
++ * system. After successful submission of a request, this function must be
++ * called before accessing the response of the request, freeing the request,
++ * or freeing any of the buffers associated with the request.
++ *
++ * This function must not be called if the request has not been submitted yet
++ * and may lead to a deadlock/infinite wait if a subsequent request submission
++ * fails in that case, due to the completion never triggering.
++ *
++ * Return: Returns the status of the given request, which is set on completion
++ * of the packet. This value is zero on success and negative on failure.
++ */
++static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
++{
++	wait_for_completion(&rqst->comp);
++	return rqst->status;
++}
++
++int ssam_request_sync(struct ssam_controller *ctrl,
++		      const struct ssam_request *spec,
++		      struct ssam_response *rsp);
++
++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
++				  const struct ssam_request *spec,
++				  struct ssam_response *rsp,
++				  struct ssam_span *buf);
++
++/**
++ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
++ * @ctrl: The controller via which the request is submitted.
++ * @rqst: The request specification.
++ * @rsp:  The response buffer.
++ * @payload_len: The (maximum) request payload length.
++ *
++ * Allocates a synchronous request with specified payload length on the stack,
++ * fully initializes it via the provided request specification, submits it,
++ * and finally waits for its completion before returning its status. This
++ * helper macro essentially allocates the request message buffer on the stack
++ * and then calls ssam_request_sync_with_buffer().
++ *
++ * Note: The @payload_len parameter specifies the maximum payload length, used
++ * for buffer allocation. The actual payload length may be smaller.
++ *
++ * Return: Returns the status of the request or any failure during setup, i.e.
++ * zero on success and a negative value on failure.
++ */
++#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len)			\
++	({									\
++		u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)];		\
++		struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) };	\
++										\
++		ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf);		\
++	})
++
++/**
++ * __ssam_retry - Retry request in case of I/O errors or timeouts.
++ * @request: The request function to execute. Must return an integer.
++ * @n:       Number of tries.
++ * @args:    Arguments for the request function.
++ *
++ * Executes the given request function, i.e. calls @request. In case the
++ * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request
++ * or underlying packet timed out), @request will be re-executed again, up to
++ * @n times in total.
++ *
++ * Return: Returns the return value of the last execution of @request.
++ */
++#define __ssam_retry(request, n, args...)				\
++	({								\
++		int __i, __s = 0;					\
++									\
++		for (__i = (n); __i > 0; __i--) {			\
++			__s = request(args);				\
++			if (__s != -ETIMEDOUT && __s != -EREMOTEIO)	\
++				break;					\
++		}							\
++		__s;							\
++	})
++
++/**
++ * ssam_retry - Retry request in case of I/O errors or timeouts up to three
++ * times in total.
++ * @request: The request function to execute. Must return an integer.
++ * @args:    Arguments for the request function.
++ *
++ * Executes the given request function, i.e. calls @request. In case the
++ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
++ * or underlying packet timed out), @request will be re-executed again, up to
++ * three times in total.
++ *
++ * See __ssam_retry() for a more generic macro for this purpose.
++ *
++ * Return: Returns the return value of the last execution of @request.
++ */
++#define ssam_retry(request, args...) \
++	__ssam_retry(request, 3, args)
++
++/**
++ * struct ssam_request_spec - Blue-print specification of SAM request.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @target_id:       ID of the request's target.
++ * @command_id:      Command ID of the request.
++ * @instance_id:     Instance ID of the request's target.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ *
++ * Blue-print specification for a SAM request. This struct describes the
++ * unique static parameters of a request (i.e. type) without specifying any of
++ * its instance-specific data (e.g. payload). It is intended to be used as base
++ * for defining simple request functions via the
++ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
++ */
++struct ssam_request_spec {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u8 flags;
++};
++
++/**
++ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
++ * request.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @command_id:      Command ID of the request.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ *
++ * Blue-print specification for a multi-device SAM request, i.e. a request
++ * that is applicable to multiple device instances, described by their
++ * individual target and instance IDs. This struct describes the unique static
++ * parameters of a request (i.e. type) without specifying any of its
++ * instance-specific data (e.g. payload) and without specifying any of its
++ * device specific IDs (i.e. target and instance ID). It is intended to be
++ * used as base for defining simple multi-device request functions via the
++ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
++ * families of macros.
++ */
++struct ssam_request_spec_md {
++	u8 target_category;
++	u8 command_id;
++	u8 flags;
++};
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
++ * with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. The
++ * generated function takes care of setting up the request struct and buffer
++ * allocation, as well as execution of the request itself, returning once the
++ * request has been fully completed. The required transport buffer will be
++ * allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl)``, returning the status of the request, which is zero on success and
++ * negative on failure. The ``ctrl`` parameter is the controller via which the
++ * request is being sent.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...)				\
++	int name(struct ssam_controller *ctrl)					\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags;						\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
++ * argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. The generated function takes care of setting up the request
++ * struct, buffer allocation, as well as execution of the request itself,
++ * returning once the request has been fully completed. The required transport
++ * buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, const atype *arg)``, returning the status of the request, which is
++ * zero on success and negative on failure. The ``ctrl`` parameter is the
++ * controller via which the request is sent. The request argument is specified
++ * via the ``arg`` pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...)			\
++	int name(struct ssam_controller *ctrl, const atype *arg)		\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags;						\
++		rqst.length = sizeof(atype);					\
++		rqst.payload = (u8 *)arg;					\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL,		\
++						 sizeof(atype));		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
++ * return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. The generated function takes care of setting up the request
++ * and response structs, buffer allocation, as well as execution of the
++ * request itself, returning once the request has been fully completed. The
++ * required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, rtype *ret)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``ctrl`` parameter is the controller
++ * via which the request is sent. The request's return value is written to the
++ * memory pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...)			\
++	int name(struct ssam_controller *ctrl, rtype *ret)			\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++		struct ssam_response rsp;					\
++		int status;							\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE;		\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		rsp.capacity = sizeof(rtype);					\
++		rsp.length = 0;							\
++		rsp.pointer = (u8 *)ret;					\
++										\
++		status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0);	\
++		if (status)							\
++			return status;						\
++										\
++		if (rsp.length != sizeof(rtype)) {				\
++			struct device *dev = ssam_controller_device(ctrl);	\
++			dev_err(dev,						\
++				"rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
++				sizeof(rtype), rsp.length, rqst.target_category,\
++				rqst.command_id);				\
++			return -EIO;						\
++		}								\
++										\
++		return 0;							\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
++ * request function with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. Device
++ * specifying parameters are not hard-coded, but instead must be provided to
++ * the function. The generated function takes care of setting up the request
++ * struct, buffer allocation, as well as execution of the request itself,
++ * returning once the request has been fully completed. The required transport
++ * buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is
++ * zero on success and negative on failure. The ``ctrl`` parameter is the
++ * controller via which the request is sent, ``tid`` the target ID for the
++ * request, and ``iid`` the instance ID.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...)				\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid)			\
++	{									\
++		struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags;						\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
++ * request function with argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. Device specifying parameters are not hard-coded, but instead
++ * must be provided to the function. The generated function takes care of
++ * setting up the request struct, buffer allocation, as well as execution of
++ * the request itself, returning once the request has been fully completed.
++ * The required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the
++ * request, which is zero on success and negative on failure. The ``ctrl``
++ * parameter is the controller via which the request is sent, ``tid`` the
++ * target ID for the request, and ``iid`` the instance ID. The request argument
++ * is specified via the ``arg`` pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...)			\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\
++	{									\
++		struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags;						\
++		rqst.length = sizeof(atype);					\
++		rqst.payload = (u8 *)arg;					\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL,		\
++						 sizeof(atype));		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
++ * request function with return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. Device specifying parameters are not hard-coded, but instead
++ * must be provided to the function. The generated function takes care of
++ * setting up the request and response structs, buffer allocation, as well as
++ * execution of the request itself, returning once the request has been fully
++ * completed. The required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request,
++ * which is zero on success and negative on failure. The ``ctrl`` parameter is
++ * the controller via which the request is sent, ``tid`` the target ID for the
++ * request, and ``iid`` the instance ID. The request's return value is written
++ * to the memory pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...)			\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)	\
++	{									\
++		struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
++		struct ssam_request rqst;					\
++		struct ssam_response rsp;					\
++		int status;							\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE;		\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		rsp.capacity = sizeof(rtype);					\
++		rsp.length = 0;							\
++		rsp.pointer = (u8 *)ret;					\
++										\
++		status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0);	\
++		if (status)							\
++			return status;						\
++										\
++		if (rsp.length != sizeof(rtype)) {				\
++			struct device *dev = ssam_controller_device(ctrl);	\
++			dev_err(dev,						\
++				"rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
++				sizeof(rtype), rsp.length, rqst.target_category,\
++				rqst.command_id);				\
++			return -EIO;						\
++		}								\
++										\
++		return 0;							\
++	}
++
++
++/* -- Event notifier/callbacks. --------------------------------------------- */
++
++#define SSAM_NOTIF_STATE_SHIFT		2
++#define SSAM_NOTIF_STATE_MASK		((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
++
++/**
++ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
++ * callback functions.
++ *
++ * @SSAM_NOTIF_HANDLED:
++ *	Indicates that the notification has been handled. This flag should be
++ *	set by the handler if the handler can act/has acted upon the event
++ *	provided to it. This flag should not be set if the handler is not a
++ *	primary handler intended for the provided event.
++ *
++ *	If this flag has not been set by any handler after the notifier chain
++ *	has been traversed, a warning will be emitted, stating that the event
++ *	has not been handled.
++ *
++ * @SSAM_NOTIF_STOP:
++ *	Indicates that the notifier traversal should stop. If this flag is
++ *	returned from a notifier callback, notifier chain traversal will
++ *	immediately stop and any remaining notifiers will not be called. This
++ *	flag is automatically set when ssam_notifier_from_errno() is called
++ *	with a negative error value.
++ */
++enum ssam_notif_flags {
++	SSAM_NOTIF_HANDLED = BIT(0),
++	SSAM_NOTIF_STOP    = BIT(1),
++};
++
++struct ssam_event_notifier;
++
++typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
++				  const struct ssam_event *event);
++
++/**
++ * struct ssam_notifier_block - Base notifier block for SSAM event
++ * notifications.
++ * @node:     The node for the list of notifiers.
++ * @fn:       The callback function of this notifier. This function takes the
++ *            respective notifier block and event as input and should return
++ *            a notifier value, which can either be obtained from the flags
++ *            provided in &enum ssam_notif_flags, converted from a standard
++ *            error value via ssam_notifier_from_errno(), or a combination of
++ *            both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
++ * @priority: Priority value determining the order in which notifier callbacks
++ *            will be called. A higher value means higher priority, i.e. the
++ *            associated callback will be executed earlier than other (lower
++ *            priority) callbacks.
++ */
++struct ssam_notifier_block {
++	struct list_head node;
++	ssam_notifier_fn_t fn;
++	int priority;
++};
++
++/**
++ * ssam_notifier_from_errno() - Convert standard error value to notifier
++ * return code.
++ * @err: The error code to convert, must be negative (in case of failure) or
++ *       zero (in case of success).
++ *
++ * Return: Returns the notifier return value obtained by converting the
++ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
++ * will be set, causing notifier call chain traversal to abort.
++ */
++static inline u32 ssam_notifier_from_errno(int err)
++{
++	if (WARN_ON(err > 0) || err == 0)
++		return 0;
++	else
++		return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
++}
++
++/**
++ * ssam_notifier_to_errno() - Convert notifier return code to standard error
++ * value.
++ * @ret: The notifier return value to convert.
++ *
++ * Return: Returns the negative error value encoded in @ret or zero if @ret
++ * indicates success.
++ */
++static inline int ssam_notifier_to_errno(u32 ret)
++{
++	return -(ret >> SSAM_NOTIF_STATE_SHIFT);
++}
++
++
++/* -- Event/notification registry. ------------------------------------------ */
++
++/**
++ * struct ssam_event_registry - Registry specification used for enabling events.
++ * @target_category: Target category for the event registry requests.
++ * @target_id:       Target ID for the event registry requests.
++ * @cid_enable:      Command ID for the event-enable request.
++ * @cid_disable:     Command ID for the event-disable request.
++ *
++ * This struct describes a SAM event registry via the minimal collection of
++ * SAM IDs specifying the requests to use for enabling and disabling an event.
++ * The individual event to be enabled/disabled itself is specified via &struct
++ * ssam_event_id.
++ */
++struct ssam_event_registry {
++	u8 target_category;
++	u8 target_id;
++	u8 cid_enable;
++	u8 cid_disable;
++};
++
++/**
++ * struct ssam_event_id - Unique event ID used for enabling events.
++ * @target_category: Target category of the event source.
++ * @instance:        Instance ID of the event source.
++ *
++ * This struct specifies the event to be enabled/disabled via an externally
++ * provided registry. It does not specify the registry to be used itself, this
++ * is done via &struct ssam_event_registry.
++ */
++struct ssam_event_id {
++	u8 target_category;
++	u8 instance;
++};
++
++/**
++ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
++ *
++ * @SSAM_EVENT_MASK_NONE:
++ *	Run the callback for any event with matching target category. Do not
++ *	do any additional filtering.
++ *
++ * @SSAM_EVENT_MASK_TARGET:
++ *	In addition to filtering by target category, only execute the notifier
++ *	callback for events with a target ID matching to the one of the
++ *	registry used for enabling/disabling the event.
++ *
++ * @SSAM_EVENT_MASK_INSTANCE:
++ *	In addition to filtering by target category, only execute the notifier
++ *	callback for events with an instance ID matching to the instance ID
++ *	used when enabling the event.
++ *
++ * @SSAM_EVENT_MASK_STRICT:
++ *	Do all the filtering above.
++ */
++enum ssam_event_mask {
++	SSAM_EVENT_MASK_TARGET   = BIT(0),
++	SSAM_EVENT_MASK_INSTANCE = BIT(1),
++
++	SSAM_EVENT_MASK_NONE = 0,
++	SSAM_EVENT_MASK_STRICT =
++		  SSAM_EVENT_MASK_TARGET
++		| SSAM_EVENT_MASK_INSTANCE,
++};
++
++/**
++ * SSAM_EVENT_REGISTRY() - Define a new event registry.
++ * @tc:      Target category for the event registry requests.
++ * @tid:     Target ID for the event registry requests.
++ * @cid_en:  Command ID for the event-enable request.
++ * @cid_dis: Command ID for the event-disable request.
++ *
++ * Return: Returns the &struct ssam_event_registry specified by the given
++ * parameters.
++ */
++#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis)	\
++	((struct ssam_event_registry) {			\
++		.target_category = (tc),		\
++		.target_id = (tid),			\
++		.cid_enable = (cid_en),			\
++		.cid_disable = (cid_dis),		\
++	})
++
++#define SSAM_EVENT_REGISTRY_SAM	\
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
++
++#define SSAM_EVENT_REGISTRY_KIP	\
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
++
++#define SSAM_EVENT_REGISTRY_REG \
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
++
++/**
++ * struct ssam_event_notifier - Notifier block for SSAM events.
++ * @base:        The base notifier block with callback function and priority.
++ * @event:       The event for which this block will receive notifications.
++ * @event.reg:   Registry via which the event will be enabled/disabled.
++ * @event.id:    ID specifying the event.
++ * @event.mask:  Flags determining how events are matched to the notifier.
++ * @event.flags: Flags used for enabling the event.
++ */
++struct ssam_event_notifier {
++	struct ssam_notifier_block base;
++
++	struct {
++		struct ssam_event_registry reg;
++		struct ssam_event_id id;
++		enum ssam_event_mask mask;
++		u8 flags;
++	} event;
++};
++
++int ssam_notifier_register(struct ssam_controller *ctrl,
++			   struct ssam_event_notifier *n);
++
++int ssam_notifier_unregister(struct ssam_controller *ctrl,
++			     struct ssam_event_notifier *n);
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
+diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
+new file mode 100644
+index 000000000000..64276fbfa1d5
+--- /dev/null
++++ b/include/linux/surface_aggregator/serial_hub.h
+@@ -0,0 +1,672 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface Serial Hub (SSH) protocol and communication interface.
++ *
++ * Lower-level communication layers and SSH protocol definitions for the
++ * Surface System Aggregator Module (SSAM). Provides the interface for basic
++ * packet- and request-based communication with the SSAM EC via SSH.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
++#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
++
++#include <linux/crc-ccitt.h>
++#include <linux/kref.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/types.h>
++
++
++/* -- Data structures for SAM-over-SSH communication. ----------------------- */
++
++/**
++ * enum ssh_frame_type - Frame types for SSH frames.
++ *
++ * @SSH_FRAME_TYPE_DATA_SEQ:
++ *	Indicates a data frame, followed by a payload with the length specified
++ *	in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
++ *	that an ACK is required.
++ *
++ * @SSH_FRAME_TYPE_DATA_NSQ:
++ *	Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
++ *	message does not have to be ACKed.
++ *
++ * @SSH_FRAME_TYPE_ACK:
++ *	Indicates an ACK message.
++ *
++ * @SSH_FRAME_TYPE_NAK:
++ *	Indicates an error response for previously sent frame. In general, this
++ *	means that the frame and/or payload is malformed, e.g. a CRC is wrong.
++ *	For command-type payloads, this can also mean that the command is
++ *	invalid.
++ */
++enum ssh_frame_type {
++	SSH_FRAME_TYPE_DATA_SEQ = 0x80,
++	SSH_FRAME_TYPE_DATA_NSQ = 0x00,
++	SSH_FRAME_TYPE_ACK      = 0x40,
++	SSH_FRAME_TYPE_NAK      = 0x04,
++};
++
++/**
++ * struct ssh_frame - SSH communication frame.
++ * @type: The type of the frame. See &enum ssh_frame_type.
++ * @len:  The length of the frame payload directly following the CRC for this
++ *        frame. Does not include the final CRC for that payload.
++ * @seq:  The sequence number for this message/exchange.
++ */
++struct ssh_frame {
++	u8 type;
++	__le16 len;
++	u8 seq;
++} __packed;
++
++static_assert(sizeof(struct ssh_frame) == 4);
++
++/*
++ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
++ *
++ * This is the physical maximum length of the protocol. Implementations may
++ * set a more constrained limit.
++ */
++#define SSH_FRAME_MAX_PAYLOAD_SIZE	U16_MAX
++
++/**
++ * enum ssh_payload_type - Type indicator for the SSH payload.
++ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
++ *                    payload.
++ */
++enum ssh_payload_type {
++	SSH_PLD_TYPE_CMD = 0x80,
++};
++
++/**
++ * struct ssh_command - Payload of a command-type frame.
++ * @type:    The type of the payload. See &enum ssh_payload_type. Should be
++ *           SSH_PLD_TYPE_CMD for this struct.
++ * @tc:      Command target category.
++ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
++ *           message.
++ * @tid_in:  Input target ID. Should be zero if this is an outgoing (host to
++ *           EC) message.
++ * @iid:     Instance ID.
++ * @rqid:    Request ID. Used to match requests with responses and differentiate
++ *           between responses and events.
++ * @cid:     Command ID.
++ */
++struct ssh_command {
++	u8 type;
++	u8 tc;
++	u8 tid_out;
++	u8 tid_in;
++	u8 iid;
++	__le16 rqid;
++	u8 cid;
++} __packed;
++
++static_assert(sizeof(struct ssh_command) == 8);
++
++/*
++ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
++ *
++ * This is the physical maximum length of the protocol. Implementations may
++ * set a more constrained limit.
++ */
++#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
++	(SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
++
++/*
++ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
++ *
++ * This is the minimum number of bytes required to form a message. The actual
++ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
++ */
++#define SSH_MSG_LEN_BASE	(sizeof(struct ssh_frame) + 3ull * sizeof(u16))
++
++/*
++ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
++ *
++ * This is the length of a SSH control message, which is equal to a SSH
++ * message without any payload.
++ */
++#define SSH_MSG_LEN_CTRL	SSH_MSG_LEN_BASE
++
++/**
++ * SSH_MESSAGE_LENGTH() - Compute length of SSH message.
++ * @payload_size: Length of the payload inside the SSH frame.
++ *
++ * Return: Returns the length of a SSH message with payload of specified size.
++ */
++#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size))
++
++/**
++ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
++ * @payload_size: Length of the command payload.
++ *
++ * Return: Returns the length of a SSH command message with command payload of
++ * specified size.
++ */
++#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
++	SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size))
++
++/**
++ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
++ * frame.
++ * @field: The field for which the offset should be computed.
++ *
++ * Return: Returns the offset of the specified &struct ssh_frame field in the
++ * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into
++ * account.
++ */
++#define SSH_MSGOFFSET_FRAME(field) \
++	(sizeof(u16) + offsetof(struct ssh_frame, field))
++
++/**
++ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
++ * in command.
++ * @field: The field for which the offset should be computed.
++ *
++ * Return: Returns the offset of the specified &struct ssh_command field in
++ * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the
++ * frame CRC (u16) between frame and command into account.
++ */
++#define SSH_MSGOFFSET_COMMAND(field) \
++	(2ull * sizeof(u16) + sizeof(struct ssh_frame) \
++		+ offsetof(struct ssh_command, field))
++
++/*
++ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
++ */
++#define SSH_MSG_SYN		((u16)0x55aa)
++
++/**
++ * ssh_crc() - Compute CRC for SSH messages.
++ * @buf: The pointer pointing to the data for which the CRC should be computed.
++ * @len: The length of the data for which the CRC should be computed.
++ *
++ * Return: Returns the CRC computed on the provided data, as used for SSH
++ * messages.
++ */
++static inline u16 ssh_crc(const u8 *buf, size_t len)
++{
++	return crc_ccitt_false(0xffff, buf, len);
++}
++
++/*
++ * SSH_NUM_EVENTS - The number of reserved event IDs.
++ *
++ * The number of reserved event IDs, used for registering an SSH event
++ * handler. Valid event IDs are numbers below or equal to this value, with
++ * exception of zero, which is not an event ID. Thus, this is also the
++ * absolute maximum number of event handlers that can be registered.
++ */
++#define SSH_NUM_EVENTS		34
++
++/*
++ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
++ */
++#define SSH_NUM_TARGETS		2
++
++/**
++ * ssh_rqid_next_valid() - Return the next valid request ID.
++ * @rqid: The current request ID.
++ *
++ * Return: Returns the next valid request ID, following the current request ID
++ * provided to this function. This function skips any request IDs reserved for
++ * events.
++ */
++static inline u16 ssh_rqid_next_valid(u16 rqid)
++{
++	return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
++}
++
++/**
++ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
++ * @rqid: The request ID to convert.
++ */
++static inline u16 ssh_rqid_to_event(u16 rqid)
++{
++	return rqid - 1u;
++}
++
++/**
++ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
++ * @rqid: The request ID to check.
++ */
++static inline bool ssh_rqid_is_event(u16 rqid)
++{
++	return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
++}
++
++/**
++ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
++ * @tc: The target category to convert.
++ */
++static inline u16 ssh_tc_to_rqid(u8 tc)
++{
++	return tc;
++}
++
++/**
++ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
++ * @tid: The target ID to convert.
++ */
++static inline u8 ssh_tid_to_index(u8 tid)
++{
++	return tid - 1u;
++}
++
++/**
++ * ssh_tid_is_valid() - Check if target ID is valid/supported.
++ * @tid: The target ID to check.
++ */
++static inline bool ssh_tid_is_valid(u8 tid)
++{
++	return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
++}
++
++/**
++ * struct ssam_span - Reference to a buffer region.
++ * @ptr: Pointer to the buffer region.
++ * @len: Length of the buffer region.
++ *
++ * A reference to a (non-owned) buffer segment, consisting of pointer and
++ * length. Use of this struct indicates non-owned data, i.e. data of which the
++ * life-time is managed (i.e. it is allocated/freed) via another pointer.
++ */
++struct ssam_span {
++	u8    *ptr;
++	size_t len;
++};
++
++/*
++ * Known SSH/EC target categories.
++ *
++ * List of currently known target category values; "Known" as in we know they
++ * exist and are valid on at least some device/model. Detailed functionality
++ * or the full category name is only known for some of these categories and
++ * is detailed in the respective comment below.
++ *
++ * These values and abbreviations have been extracted from strings inside the
++ * Windows driver.
++ */
++enum ssam_ssh_tc {
++				/* Category 0x00 is invalid for EC use. */
++	SSAM_SSH_TC_SAM = 0x01,	/* Generic system functionality, real-time clock. */
++	SSAM_SSH_TC_BAT = 0x02,	/* Battery/power subsystem. */
++	SSAM_SSH_TC_TMP = 0x03,	/* Thermal subsystem. */
++	SSAM_SSH_TC_PMC = 0x04,
++	SSAM_SSH_TC_FAN = 0x05,
++	SSAM_SSH_TC_PoM = 0x06,
++	SSAM_SSH_TC_DBG = 0x07,
++	SSAM_SSH_TC_KBD = 0x08,	/* Legacy keyboard (Laptop 1/2). */
++	SSAM_SSH_TC_FWU = 0x09,
++	SSAM_SSH_TC_UNI = 0x0a,
++	SSAM_SSH_TC_LPC = 0x0b,
++	SSAM_SSH_TC_TCL = 0x0c,
++	SSAM_SSH_TC_SFL = 0x0d,
++	SSAM_SSH_TC_KIP = 0x0e,
++	SSAM_SSH_TC_EXT = 0x0f,
++	SSAM_SSH_TC_BLD = 0x10,
++	SSAM_SSH_TC_BAS = 0x11,	/* Detachment system (Surface Book 2/3). */
++	SSAM_SSH_TC_SEN = 0x12,
++	SSAM_SSH_TC_SRQ = 0x13,
++	SSAM_SSH_TC_MCU = 0x14,
++	SSAM_SSH_TC_HID = 0x15,	/* Generic HID input subsystem. */
++	SSAM_SSH_TC_TCH = 0x16,
++	SSAM_SSH_TC_BKL = 0x17,
++	SSAM_SSH_TC_TAM = 0x18,
++	SSAM_SSH_TC_ACC = 0x19,
++	SSAM_SSH_TC_UFI = 0x1a,
++	SSAM_SSH_TC_USC = 0x1b,
++	SSAM_SSH_TC_PEN = 0x1c,
++	SSAM_SSH_TC_VID = 0x1d,
++	SSAM_SSH_TC_AUD = 0x1e,
++	SSAM_SSH_TC_SMC = 0x1f,
++	SSAM_SSH_TC_KPD = 0x20,
++	SSAM_SSH_TC_REG = 0x21,	/* Extended event registry. */
++};
++
++
++/* -- Packet transport layer (ptl). ----------------------------------------- */
++
++/**
++ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
++ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
++ * @SSH_PACKET_PRIORITY_DATA:  Base priority for normal data packets.
++ * @SSH_PACKET_PRIORITY_NAK:   Base priority for NAK packets.
++ * @SSH_PACKET_PRIORITY_ACK:   Base priority for ACK packets.
++ */
++enum ssh_packet_base_priority {
++	SSH_PACKET_PRIORITY_FLUSH = 0,	/* same as DATA to sequence flush */
++	SSH_PACKET_PRIORITY_DATA  = 0,
++	SSH_PACKET_PRIORITY_NAK   = 1,
++	SSH_PACKET_PRIORITY_ACK   = 2,
++};
++
++/*
++ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
++ */
++#define __SSH_PACKET_PRIORITY(base, try) \
++	(((base) << 4) | ((try) & 0x0f))
++
++/**
++ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
++ * number of tries.
++ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
++ *        ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
++ * @try:  The number of tries (must be less than 16).
++ *
++ * Compute the combined packet priority. The combined priority is dominated by
++ * the base priority, whereas the number of (re-)tries decides the precedence
++ * of packets with the same base priority, giving higher priority to packets
++ * that already have more tries.
++ *
++ * Return: Returns the computed priority as value fitting inside a &u8. A
++ * higher number means a higher priority.
++ */
++#define SSH_PACKET_PRIORITY(base, try) \
++	__SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
++
++/**
++ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
++ * @priority: The packet priority.
++ *
++ * Return: Returns the number of tries encoded in the specified packet
++ * priority.
++ */
++static inline u8 ssh_packet_priority_get_try(u8 priority)
++{
++	return priority & 0x0f;
++}
++
++/**
++ * ssh_packet_priority_get_base - Get base priority from packet priority.
++ * @priority: The packet priority.
++ *
++ * Return: Returns the base priority encoded in the given packet priority.
++ */
++static inline u8 ssh_packet_priority_get_base(u8 priority)
++{
++	return (priority & 0xf0) >> 4;
++}
++
++enum ssh_packet_flags {
++	/* state flags */
++	SSH_PACKET_SF_LOCKED_BIT,
++	SSH_PACKET_SF_QUEUED_BIT,
++	SSH_PACKET_SF_PENDING_BIT,
++	SSH_PACKET_SF_TRANSMITTING_BIT,
++	SSH_PACKET_SF_TRANSMITTED_BIT,
++	SSH_PACKET_SF_ACKED_BIT,
++	SSH_PACKET_SF_CANCELED_BIT,
++	SSH_PACKET_SF_COMPLETED_BIT,
++
++	/* type flags */
++	SSH_PACKET_TY_FLUSH_BIT,
++	SSH_PACKET_TY_SEQUENCED_BIT,
++	SSH_PACKET_TY_BLOCKING_BIT,
++
++	/* mask for state flags */
++	SSH_PACKET_FLAGS_SF_MASK =
++		  BIT(SSH_PACKET_SF_LOCKED_BIT)
++		| BIT(SSH_PACKET_SF_QUEUED_BIT)
++		| BIT(SSH_PACKET_SF_PENDING_BIT)
++		| BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
++		| BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
++		| BIT(SSH_PACKET_SF_ACKED_BIT)
++		| BIT(SSH_PACKET_SF_CANCELED_BIT)
++		| BIT(SSH_PACKET_SF_COMPLETED_BIT),
++
++	/* mask for type flags */
++	SSH_PACKET_FLAGS_TY_MASK =
++		  BIT(SSH_PACKET_TY_FLUSH_BIT)
++		| BIT(SSH_PACKET_TY_SEQUENCED_BIT)
++		| BIT(SSH_PACKET_TY_BLOCKING_BIT),
++};
++
++struct ssh_ptl;
++struct ssh_packet;
++
++/**
++ * struct ssh_packet_ops - Callback operations for a SSH packet.
++ * @release:  Function called when the packet reference count reaches zero.
++ *            This callback must be relied upon to ensure that the packet has
++ *            left the transport system(s).
++ * @complete: Function called when the packet is completed, either with
++ *            success or failure. In case of failure, the reason for the
++ *            failure is indicated by the value of the provided status code
++ *            argument. This value will be zero in case of success. Note that
++ *            a call to this callback does not guarantee that the packet is
++ *            not in use by the transport system any more.
++ */
++struct ssh_packet_ops {
++	void (*release)(struct ssh_packet *p);
++	void (*complete)(struct ssh_packet *p, int status);
++};
++
++/**
++ * struct ssh_packet - SSH transport packet.
++ * @ptl:      Pointer to the packet transport layer. May be %NULL if the packet
++ *            (or enclosing request) has not been submitted yet.
++ * @refcnt:   Reference count of the packet.
++ * @priority: Priority of the packet. Must be computed via
++ *            SSH_PACKET_PRIORITY(). Must only be accessed while holding the
++ *            queue lock after first submission.
++ * @data:     Raw message data.
++ * @data.len: Length of the raw message data.
++ * @data.ptr: Pointer to the raw message data buffer.
++ * @state:    State and type flags describing current packet state (dynamic)
++ *            and type (static). See &enum ssh_packet_flags for possible
++ *            options.
++ * @timestamp: Timestamp specifying when the latest transmission of a
++ *            currently pending packet has been started. May be %KTIME_MAX
++ *            before or in-between transmission attempts. Used for the packet
++ *            timeout implementation. Must only be accessed while holding the
++ *            pending lock after first submission.
++ * @queue_node:	The list node for the packet queue.
++ * @pending_node: The list node for the set of pending packets.
++ * @ops:      Packet operations.
++ */
++struct ssh_packet {
++	struct ssh_ptl *ptl;
++	struct kref refcnt;
++
++	u8 priority;
++
++	struct {
++		size_t len;
++		u8 *ptr;
++	} data;
++
++	unsigned long state;
++	ktime_t timestamp;
++
++	struct list_head queue_node;
++	struct list_head pending_node;
++
++	const struct ssh_packet_ops *ops;
++};
++
++struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
++void ssh_packet_put(struct ssh_packet *p);
++
++/**
++ * ssh_packet_set_data() - Set raw message data of packet.
++ * @p:   The packet for which the message data should be set.
++ * @ptr: Pointer to the memory holding the message data.
++ * @len: Length of the message data.
++ *
++ * Sets the raw message data buffer of the packet to the provided memory. The
++ * memory is not copied. Instead, the caller is responsible for management
++ * (i.e. allocation and deallocation) of the memory. The caller must ensure
++ * that the provided memory is valid and contains a valid SSH message,
++ * starting from the time of submission of the packet until the ``release``
++ * callback has been called. During this time, the memory may not be altered
++ * in any way.
++ */
++static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
++{
++	p->data.ptr = ptr;
++	p->data.len = len;
++}
++
++
++/* -- Request transport layer (rtl). ---------------------------------------- */
++
++enum ssh_request_flags {
++	/* state flags */
++	SSH_REQUEST_SF_LOCKED_BIT,
++	SSH_REQUEST_SF_QUEUED_BIT,
++	SSH_REQUEST_SF_PENDING_BIT,
++	SSH_REQUEST_SF_TRANSMITTING_BIT,
++	SSH_REQUEST_SF_TRANSMITTED_BIT,
++	SSH_REQUEST_SF_RSPRCVD_BIT,
++	SSH_REQUEST_SF_CANCELED_BIT,
++	SSH_REQUEST_SF_COMPLETED_BIT,
++
++	/* type flags */
++	SSH_REQUEST_TY_FLUSH_BIT,
++	SSH_REQUEST_TY_HAS_RESPONSE_BIT,
++
++	/* mask for state flags */
++	SSH_REQUEST_FLAGS_SF_MASK =
++		  BIT(SSH_REQUEST_SF_LOCKED_BIT)
++		| BIT(SSH_REQUEST_SF_QUEUED_BIT)
++		| BIT(SSH_REQUEST_SF_PENDING_BIT)
++		| BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
++		| BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
++		| BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
++		| BIT(SSH_REQUEST_SF_CANCELED_BIT)
++		| BIT(SSH_REQUEST_SF_COMPLETED_BIT),
++
++	/* mask for type flags */
++	SSH_REQUEST_FLAGS_TY_MASK =
++		  BIT(SSH_REQUEST_TY_FLUSH_BIT)
++		| BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
++};
++
++struct ssh_rtl;
++struct ssh_request;
++
++/**
++ * struct ssh_request_ops - Callback operations for a SSH request.
++ * @release:  Function called when the request's reference count reaches zero.
++ *            This callback must be relied upon to ensure that the request has
++ *            left the transport systems (both, packet an request systems).
++ * @complete: Function called when the request is completed, either with
++ *            success or failure. The command data for the request response
++ *            is provided via the &struct ssh_command parameter (``cmd``),
++ *            the command payload of the request response via the &struct
++ *            ssh_span parameter (``data``).
++ *
++ *            If the request does not have any response or has not been
++ *            completed with success, both ``cmd`` and ``data`` parameters will
++ *            be NULL. If the request response does not have any command
++ *            payload, the ``data`` span will be an empty (zero-length) span.
++ *
++ *            In case of failure, the reason for the failure is indicated by
++ *            the value of the provided status code argument (``status``). This
++ *            value will be zero in case of success and a regular errno
++ *            otherwise.
++ *
++ *            Note that a call to this callback does not guarantee that the
++ *            request is not in use by the transport systems any more.
++ */
++struct ssh_request_ops {
++	void (*release)(struct ssh_request *rqst);
++	void (*complete)(struct ssh_request *rqst,
++			 const struct ssh_command *cmd,
++			 const struct ssam_span *data, int status);
++};
++
++/**
++ * struct ssh_request - SSH transport request.
++ * @packet: The underlying SSH transport packet.
++ * @node:   List node for the request queue and pending set.
++ * @state:  State and type flags describing current request state (dynamic)
++ *          and type (static). See &enum ssh_request_flags for possible
++ *          options.
++ * @timestamp: Timestamp specifying when we start waiting on the response of
++ *          the request. This is set once the underlying packet has been
++ *          completed and may be %KTIME_MAX before that, or when the request
++ *          does not expect a response. Used for the request timeout
++ *          implementation.
++ * @ops:    Request Operations.
++ */
++struct ssh_request {
++	struct ssh_packet packet;
++	struct list_head node;
++
++	unsigned long state;
++	ktime_t timestamp;
++
++	const struct ssh_request_ops *ops;
++};
++
++/**
++ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
++ * @p: The packet to cast.
++ *
++ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
++ * The caller is responsible for making sure that the packet is actually
++ * wrapped in a &struct ssh_request.
++ *
++ * Return: Returns the &struct ssh_request wrapping the provided packet.
++ */
++static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
++{
++	return container_of(p, struct ssh_request, packet);
++}
++
++/**
++ * ssh_request_get() - Increment reference count of request.
++ * @r: The request to increment the reference count of.
++ *
++ * Increments the reference count of the given request by incrementing the
++ * reference count of the underlying &struct ssh_packet, enclosed in it.
++ *
++ * See also ssh_request_put(), ssh_packet_get().
++ *
++ * Return: Returns the request provided as input.
++ */
++static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
++{
++	return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
++}
++
++/**
++ * ssh_request_put() - Decrement reference count of request.
++ * @r: The request to decrement the reference count of.
++ *
++ * Decrements the reference count of the given request by decrementing the
++ * reference count of the underlying &struct ssh_packet, enclosed in it. If
++ * the reference count reaches zero, the ``release`` callback specified in the
++ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
++ * called.
++ *
++ * See also ssh_request_get(), ssh_packet_put().
++ */
++static inline void ssh_request_put(struct ssh_request *r)
++{
++	if (r)
++		ssh_packet_put(&r->packet);
++}
++
++/**
++ * ssh_request_set_data() - Set raw message data of request.
++ * @r:   The request for which the message data should be set.
++ * @ptr: Pointer to the memory holding the message data.
++ * @len: Length of the message data.
++ *
++ * Sets the raw message data buffer of the underlying packet to the specified
++ * buffer. Does not copy the actual message data, just sets the buffer pointer
++ * and length. Refer to ssh_packet_set_data() for more details.
++ */
++static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
++{
++	ssh_packet_set_data(&r->packet, ptr, len);
++}
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
+-- 
+2.30.1
+
+From c27a9fdec42a6256b5a108771c1ccccb4a022129 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:52 +0100
+Subject: [PATCH] platform/surface: aggregator: Add control packet allocation
+ caching
+
+Surface Serial Hub communication is, in its core, packet based. Each
+sequenced packet requires to be acknowledged, via an ACK-type control
+packet. In case invalid data has been received by the driver, a NAK-type
+(not-acknowledge/negative acknowledge) control packet is sent,
+triggering retransmission.
+
+Control packets are therefore a core communication primitive and used
+frequently enough (with every sequenced packet transmission sent by the
+embedded controller, including events and request responses) that it may
+warrant caching their allocations to reduce possible memory
+fragmentation.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-3-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/core.c    | 27 ++++++++++-
+ .../surface/aggregator/ssh_packet_layer.c     | 47 +++++++++++++++----
+ .../surface/aggregator/ssh_packet_layer.h     |  3 ++
+ 3 files changed, 67 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 18e0e9e34e7b..60d312f71436 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -780,7 +780,32 @@ static struct serdev_device_driver ssam_serial_hub = {
+ 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ 	},
+ };
+-module_serdev_device_driver(ssam_serial_hub);
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init ssam_core_init(void)
++{
++	int status;
++
++	status = ssh_ctrl_packet_cache_init();
++	if (status)
++		return status;
++
++	status = serdev_device_driver_register(&ssam_serial_hub);
++	if (status)
++		ssh_ctrl_packet_cache_destroy();
++
++	return status;
++}
++module_init(ssam_core_init);
++
++static void __exit ssam_core_exit(void)
++{
++	serdev_device_driver_unregister(&ssam_serial_hub);
++	ssh_ctrl_packet_cache_destroy();
++}
++module_exit(ssam_core_exit);
+ 
+ MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+ MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+index 66e38fdc7963..23c2e31e7d0e 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -303,24 +303,53 @@ void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ 	packet->ops = ops;
+ }
+ 
++static struct kmem_cache *ssh_ctrl_packet_cache;
++
++/**
++ * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
++ */
++int ssh_ctrl_packet_cache_init(void)
++{
++	const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
++	const unsigned int align = __alignof__(struct ssh_packet);
++	struct kmem_cache *cache;
++
++	cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
++	if (!cache)
++		return -ENOMEM;
++
++	ssh_ctrl_packet_cache = cache;
++	return 0;
++}
++
++/**
++ * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
++ */
++void ssh_ctrl_packet_cache_destroy(void)
++{
++	kmem_cache_destroy(ssh_ctrl_packet_cache);
++	ssh_ctrl_packet_cache = NULL;
++}
++
+ /**
+- * ssh_ctrl_packet_alloc() - Allocate control packet.
++ * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
+  * @packet: Where the pointer to the newly allocated packet should be stored.
+  * @buffer: The buffer corresponding to this packet.
+  * @flags:  Flags used for allocation.
+  *
+- * Allocates a packet and corresponding transport buffer. Sets the packet's
+- * buffer reference to the allocated buffer. The packet must be freed via
+- * ssh_ctrl_packet_free(), which will also free the corresponding buffer. The
+- * corresponding buffer must not be freed separately. Intended to be used with
+- * %ssh_ptl_ctrl_packet_ops as packet operations.
++ * Allocates a packet and corresponding transport buffer from the control
++ * packet cache. Sets the packet's buffer reference to the allocated buffer.
++ * The packet must be freed via ssh_ctrl_packet_free(), which will also free
++ * the corresponding buffer. The corresponding buffer must not be freed
++ * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
++ * operations.
+  *
+  * Return: Returns zero on success, %-ENOMEM if the allocation failed.
+  */
+ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ 				 struct ssam_span *buffer, gfp_t flags)
+ {
+-	*packet = kzalloc(sizeof(**packet) + SSH_MSG_LEN_CTRL, flags);
++	*packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
+ 	if (!*packet)
+ 		return -ENOMEM;
+ 
+@@ -331,12 +360,12 @@ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ }
+ 
+ /**
+- * ssh_ctrl_packet_free() - Free control packet.
++ * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
+  * @p: The packet to free.
+  */
+ static void ssh_ctrl_packet_free(struct ssh_packet *p)
+ {
+-	kfree(p);
++	kmem_cache_free(ssh_ctrl_packet_cache, p);
+ }
+ 
+ static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
+index 058f111292ca..e8757d03f279 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
+@@ -184,4 +184,7 @@ static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl)
+ void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
+ 		     u8 priority, const struct ssh_packet_ops *ops);
+ 
++int ssh_ctrl_packet_cache_init(void);
++void ssh_ctrl_packet_cache_destroy(void);
++
+ #endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
+-- 
+2.30.1
+
+From dfa18cda7bcd165f010296f86b979d8184c7c424 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:53 +0100
+Subject: [PATCH] platform/surface: aggregator: Add event item allocation
+ caching
+
+Event items are used for completing Surface Aggregator EC events, i.e.
+placing event command data and payload on a workqueue for later
+processing to avoid doing said processing directly on the receiver
+thread. This means that event items are allocated for each incoming
+event, regardless of that event being transmitted via sequenced or
+unsequenced packets.
+
+On the Surface Book 3 and Surface Laptop 3, touchpad HID input events
+(unsequenced), can constitute a larger amount of traffic, and therefore
+allocation of event items. This warrants caching event items to reduce
+memory fragmentation. The size of the cached objects is specifically
+tuned to accommodate keyboard and touchpad input events and their
+payloads on those devices. As a result, this effectively also covers
+most other event types. In case of a larger event payload, event item
+allocation will fall back to kzalloc().
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-4-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ .../platform/surface/aggregator/controller.c  | 86 +++++++++++++++++--
+ .../platform/surface/aggregator/controller.h  |  9 ++
+ drivers/platform/surface/aggregator/core.c    | 16 +++-
+ 3 files changed, 101 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 488318cf2098..775a4509bece 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -513,14 +513,74 @@ static void ssam_nf_destroy(struct ssam_nf *nf)
+  */
+ #define SSAM_CPLT_WQ_BATCH	10
+ 
++/*
++ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
++ * &struct ssam_event_item.
++ *
++ * This length has been chosen to be accommodate standard touchpad and
++ * keyboard input events. Events with larger payloads will be allocated
++ * separately.
++ */
++#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN	32
++
++static struct kmem_cache *ssam_event_item_cache;
++
++/**
++ * ssam_event_item_cache_init() - Initialize the event item cache.
++ */
++int ssam_event_item_cache_init(void)
++{
++	const unsigned int size = sizeof(struct ssam_event_item)
++				  + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
++	const unsigned int align = __alignof__(struct ssam_event_item);
++	struct kmem_cache *cache;
++
++	cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
++	if (!cache)
++		return -ENOMEM;
++
++	ssam_event_item_cache = cache;
++	return 0;
++}
++
++/**
++ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
++ */
++void ssam_event_item_cache_destroy(void)
++{
++	kmem_cache_destroy(ssam_event_item_cache);
++	ssam_event_item_cache = NULL;
++}
++
++static void __ssam_event_item_free_cached(struct ssam_event_item *item)
++{
++	kmem_cache_free(ssam_event_item_cache, item);
++}
++
++static void __ssam_event_item_free_generic(struct ssam_event_item *item)
++{
++	kfree(item);
++}
++
++/**
++ * ssam_event_item_free() - Free the provided event item.
++ * @item: The event item to free.
++ */
++static void ssam_event_item_free(struct ssam_event_item *item)
++{
++	item->ops.free(item);
++}
++
+ /**
+  * ssam_event_item_alloc() - Allocate an event item with the given payload size.
+  * @len:   The event payload length.
+  * @flags: The flags used for allocation.
+  *
+- * Allocate an event item with the given payload size. Sets the item
+- * operations and payload length values. The item free callback (``ops.free``)
+- * should not be overwritten after this call.
++ * Allocate an event item with the given payload size, preferring allocation
++ * from the event item cache if the payload is small enough (i.e. smaller than
++ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
++ * length values. The item free callback (``ops.free``) should not be
++ * overwritten after this call.
+  *
+  * Return: Returns the newly allocated event item.
+  */
+@@ -528,9 +588,19 @@ static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
+ {
+ 	struct ssam_event_item *item;
+ 
+-	item = kzalloc(struct_size(item, event.data, len), flags);
+-	if (!item)
+-		return NULL;
++	if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
++		item = kmem_cache_alloc(ssam_event_item_cache, flags);
++		if (!item)
++			return NULL;
++
++		item->ops.free = __ssam_event_item_free_cached;
++	} else {
++		item = kzalloc(struct_size(item, event.data, len), flags);
++		if (!item)
++			return NULL;
++
++		item->ops.free = __ssam_event_item_free_generic;
++	}
+ 
+ 	item->event.length = len;
+ 	return item;
+@@ -692,7 +762,7 @@ static void ssam_event_queue_work_fn(struct work_struct *work)
+ 			return;
+ 
+ 		ssam_nf_call(nf, dev, item->rqid, &item->event);
+-		kfree(item);
++		ssam_event_item_free(item);
+ 	} while (--iterations);
+ 
+ 	if (!ssam_event_queue_is_empty(queue))
+@@ -900,7 +970,7 @@ static void ssam_handle_event(struct ssh_rtl *rtl,
+ 	memcpy(&item->event.data[0], data->ptr, data->len);
+ 
+ 	if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
+-		kfree(item);
++		ssam_event_item_free(item);
+ }
+ 
+ static const struct ssh_rtl_ops ssam_rtl_ops = {
+diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
+index 5ee9e966f1d7..8297d34e7489 100644
+--- a/drivers/platform/surface/aggregator/controller.h
++++ b/drivers/platform/surface/aggregator/controller.h
+@@ -80,12 +80,18 @@ struct ssam_cplt;
+  * struct ssam_event_item - Struct for event queuing and completion.
+  * @node:     The node in the queue.
+  * @rqid:     The request ID of the event.
++ * @ops:      Instance specific functions.
++ * @ops.free: Callback for freeing this event item.
+  * @event:    Actual event data.
+  */
+ struct ssam_event_item {
+ 	struct list_head node;
+ 	u16 rqid;
+ 
++	struct {
++		void (*free)(struct ssam_event_item *event);
++	} ops;
++
+ 	struct ssam_event event;	/* must be last */
+ };
+ 
+@@ -273,4 +279,7 @@ int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
+ int ssam_controller_suspend(struct ssam_controller *ctrl);
+ int ssam_controller_resume(struct ssam_controller *ctrl);
+ 
++int ssam_event_item_cache_init(void);
++void ssam_event_item_cache_destroy(void);
++
+ #endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 60d312f71436..37593234fb31 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -790,12 +790,23 @@ static int __init ssam_core_init(void)
+ 
+ 	status = ssh_ctrl_packet_cache_init();
+ 	if (status)
+-		return status;
++		goto err_cpkg;
++
++	status = ssam_event_item_cache_init();
++	if (status)
++		goto err_evitem;
+ 
+ 	status = serdev_device_driver_register(&ssam_serial_hub);
+ 	if (status)
+-		ssh_ctrl_packet_cache_destroy();
++		goto err_register;
+ 
++	return 0;
++
++err_register:
++	ssam_event_item_cache_destroy();
++err_evitem:
++	ssh_ctrl_packet_cache_destroy();
++err_cpkg:
+ 	return status;
+ }
+ module_init(ssam_core_init);
+@@ -803,6 +814,7 @@ module_init(ssam_core_init);
+ static void __exit ssam_core_exit(void)
+ {
+ 	serdev_device_driver_unregister(&ssam_serial_hub);
++	ssam_event_item_cache_destroy();
+ 	ssh_ctrl_packet_cache_destroy();
+ }
+ module_exit(ssam_core_exit);
+-- 
+2.30.1
+
+From 30c51ea3b67b30af16a7f79075ed72af3ed64449 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:54 +0100
+Subject: [PATCH] platform/surface: aggregator: Add trace points
+
+Add trace points to the Surface Aggregator subsystem core. These trace
+points can be used to track packets, requests, and allocations. They are
+further intended for debugging and testing/validation, specifically in
+combination with the error injection capabilities introduced in the
+subsequent commit.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Link: https://lore.kernel.org/r/20201221183959.1186143-5-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/Makefile  |   3 +
+ .../platform/surface/aggregator/controller.c  |   5 +
+ drivers/platform/surface/aggregator/core.c    |   3 +
+ .../surface/aggregator/ssh_packet_layer.c     |  26 +-
+ .../surface/aggregator/ssh_request_layer.c    |  18 +
+ drivers/platform/surface/aggregator/trace.h   | 601 ++++++++++++++++++
+ 6 files changed, 655 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/platform/surface/aggregator/trace.h
+
+diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
+index faad18d4a7f2..b8b24c8ec310 100644
+--- a/drivers/platform/surface/aggregator/Makefile
++++ b/drivers/platform/surface/aggregator/Makefile
+@@ -1,6 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0+
+ # Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ 
++# For include/trace/define_trace.h to include trace.h
++CFLAGS_core.o = -I$(src)
++
+ obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
+ 
+ surface_aggregator-objs := core.o
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 775a4509bece..5bcb59ed579d 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -32,6 +32,8 @@
+ #include "ssh_msgb.h"
+ #include "ssh_request_layer.h"
+ 
++#include "trace.h"
++
+ 
+ /* -- Safe counters. -------------------------------------------------------- */
+ 
+@@ -568,6 +570,7 @@ static void __ssam_event_item_free_generic(struct ssam_event_item *item)
+  */
+ static void ssam_event_item_free(struct ssam_event_item *item)
+ {
++	trace_ssam_event_item_free(item);
+ 	item->ops.free(item);
+ }
+ 
+@@ -603,6 +606,8 @@ static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
+ 	}
+ 
+ 	item->event.length = len;
++
++	trace_ssam_event_item_alloc(item, len);
+ 	return item;
+ }
+ 
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 37593234fb31..b6a9dea53592 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -24,6 +24,9 @@
+ #include <linux/surface_aggregator/controller.h>
+ #include "controller.h"
+ 
++#define CREATE_TRACE_POINTS
++#include "trace.h"
++
+ 
+ /* -- Static controller reference. ------------------------------------------ */
+ 
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+index 23c2e31e7d0e..c4f082e57372 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -26,6 +26,8 @@
+ #include "ssh_packet_layer.h"
+ #include "ssh_parser.h"
+ 
++#include "trace.h"
++
+ /*
+  * To simplify reasoning about the code below, we define a few concepts. The
+  * system below is similar to a state-machine for packets, however, there are
+@@ -228,6 +230,8 @@ static void __ssh_ptl_packet_release(struct kref *kref)
+ {
+ 	struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
+ 
++	trace_ssam_packet_release(p);
++
+ 	ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
+ 	p->ops->release(p);
+ }
+@@ -356,6 +360,7 @@ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ 	buffer->ptr = (u8 *)(*packet + 1);
+ 	buffer->len = SSH_MSG_LEN_CTRL;
+ 
++	trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
+ 	return 0;
+ }
+ 
+@@ -365,6 +370,7 @@ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+  */
+ static void ssh_ctrl_packet_free(struct ssh_packet *p)
+ {
++	trace_ssam_ctrl_packet_free(p);
+ 	kmem_cache_free(ssh_ctrl_packet_cache, p);
+ }
+ 
+@@ -398,7 +404,12 @@ static void ssh_packet_next_try(struct ssh_packet *p)
+ 
+ 	lockdep_assert_held(&p->ptl->queue.lock);
+ 
+-	p->priority = __SSH_PACKET_PRIORITY(base, try + 1);
++	/*
++	 * Ensure that we write the priority in one go via WRITE_ONCE() so we
++	 * can access it via READ_ONCE() for tracing. Note that other access
++	 * is guarded by the queue lock, so no need to use READ_ONCE() there.
++	 */
++	WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
+ }
+ 
+ /* Must be called with queue lock held. */
+@@ -560,6 +571,7 @@ static void __ssh_ptl_complete(struct ssh_packet *p, int status)
+ {
+ 	struct ssh_ptl *ptl = READ_ONCE(p->ptl);
+ 
++	trace_ssam_packet_complete(p, status);
+ 	ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
+ 
+ 	if (p->ops->complete)
+@@ -1014,6 +1026,8 @@ int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
+ 	struct ssh_ptl *ptl_old;
+ 	int status;
+ 
++	trace_ssam_packet_submit(p);
++
+ 	/* Validate packet fields. */
+ 	if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
+ 		if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
+@@ -1065,6 +1079,8 @@ static int __ssh_ptl_resubmit(struct ssh_packet *packet)
+ 
+ 	lockdep_assert_held(&packet->ptl->pending.lock);
+ 
++	trace_ssam_packet_resubmit(packet);
++
+ 	spin_lock(&packet->ptl->queue.lock);
+ 
+ 	/* Check if the packet is out of tries. */
+@@ -1148,6 +1164,8 @@ void ssh_ptl_cancel(struct ssh_packet *p)
+ 	if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
+ 		return;
+ 
++	trace_ssam_packet_cancel(p);
++
+ 	/*
+ 	 * Lock packet and commit with memory barrier. If this packet has
+ 	 * already been locked, it's going to be removed and completed by
+@@ -1202,6 +1220,8 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
+ 	bool resub = false;
+ 	int status;
+ 
++	trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
++
+ 	/*
+ 	 * Mark reaper as "not pending". This is done before checking any
+ 	 * packets to avoid lost-update type problems.
+@@ -1224,6 +1244,8 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
+ 			continue;
+ 		}
+ 
++		trace_ssam_packet_timeout(p);
++
+ 		status = __ssh_ptl_resubmit(p);
+ 
+ 		/*
+@@ -1416,6 +1438,8 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+ 	if (!frame)	/* Not enough data. */
+ 		return aligned.ptr - source->ptr;
+ 
++	trace_ssam_rx_frame_received(frame);
++
+ 	switch (frame->type) {
+ 	case SSH_FRAME_TYPE_ACK:
+ 		ssh_ptl_acknowledge(ptl, frame->seq);
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+index 66c839a995f3..b649d71840fd 100644
+--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -22,6 +22,8 @@
+ #include "ssh_packet_layer.h"
+ #include "ssh_request_layer.h"
+ 
++#include "trace.h"
++
+ /*
+  * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
+  *
+@@ -144,6 +146,8 @@ static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
+ {
+ 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ 
++	trace_ssam_request_complete(rqst, status);
++
+ 	/* rtl/ptl may not be set if we're canceling before submitting. */
+ 	rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
+ 		     ssh_request_get_rqid_safe(rqst), status);
+@@ -157,6 +161,8 @@ static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
+ {
+ 	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ 
++	trace_ssam_request_complete(rqst, 0);
++
+ 	rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
+ 		ssh_request_get_rqid(rqst));
+ 
+@@ -329,6 +335,8 @@ static void ssh_rtl_tx_work_fn(struct work_struct *work)
+  */
+ int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
+ {
++	trace_ssam_request_submit(rqst);
++
+ 	/*
+ 	 * Ensure that requests expecting a response are sequenced. If this
+ 	 * invariant ever changes, see the comment in ssh_rtl_complete() on what
+@@ -439,6 +447,8 @@ static void ssh_rtl_complete(struct ssh_rtl *rtl,
+ 	struct ssh_request *p, *n;
+ 	u16 rqid = get_unaligned_le16(&command->rqid);
+ 
++	trace_ssam_rx_response_received(command, command_data->len);
++
+ 	/*
+ 	 * Get request from pending based on request ID and mark it as response
+ 	 * received and locked.
+@@ -688,6 +698,8 @@ bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
+ 	if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
+ 		return true;
+ 
++	trace_ssam_request_cancel(rqst);
++
+ 	if (pending)
+ 		canceled = ssh_rtl_cancel_pending(rqst);
+ 	else
+@@ -779,6 +791,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
+ 	ktime_t timeout = rtl->rtx_timeout.timeout;
+ 	ktime_t next = KTIME_MAX;
+ 
++	trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
++
+ 	/*
+ 	 * Mark reaper as "not pending". This is done before checking any
+ 	 * requests to avoid lost-update type problems.
+@@ -822,6 +836,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
+ 
+ 	/* Cancel and complete the request. */
+ 	list_for_each_entry_safe(r, n, &claimed, node) {
++		trace_ssam_request_timeout(r);
++
+ 		/*
+ 		 * At this point we've removed the packet from pending. This
+ 		 * means that we've obtained the last (only) reference of the
+@@ -849,6 +865,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
+ static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ 			     const struct ssam_span *data)
+ {
++	trace_ssam_rx_event_received(cmd, data->len);
++
+ 	rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
+ 		get_unaligned_le16(&cmd->rqid));
+ 
+diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
+new file mode 100644
+index 000000000000..dcca8007d876
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/trace.h
+@@ -0,0 +1,601 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Trace points for SSAM/SSH.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM surface_aggregator
++
++#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _SURFACE_AGGREGATOR_TRACE_H
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include <asm/unaligned.h>
++#include <linux/tracepoint.h>
++
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
++
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
++
++#define SSAM_PTR_UID_LEN		9
++#define SSAM_U8_FIELD_NOT_APPLICABLE	((u16)-1)
++#define SSAM_SEQ_NOT_APPLICABLE		((u16)-1)
++#define SSAM_RQID_NOT_APPLICABLE	((u32)-1)
++#define SSAM_SSH_TC_NOT_APPLICABLE	0
++
++#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
++#define _SURFACE_AGGREGATOR_TRACE_HELPERS
++
++/**
++ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
++ * @ptr: The pointer to convert.
++ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
++ *
++ * Converts the given pointer into a UID string that is safe to be shared
++ * with userspace and logs, i.e. doesn't give away the real memory location.
++ */
++static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
++{
++	char buf[2 * sizeof(void *) + 1];
++
++	BUILD_BUG_ON(ARRAY_SIZE(buf) < SSAM_PTR_UID_LEN);
++
++	snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
++	memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
++	       SSAM_PTR_UID_LEN);
++}
++
++/**
++ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's sequence ID (SEQ) field if present, or
++ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
++ */
++static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
++		return SSAM_SEQ_NOT_APPLICABLE;
++
++	return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
++}
++
++/**
++ * ssam_trace_get_request_id() - Read the packet's request ID.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's request ID (RQID) field if the packet
++ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
++ * (e.g. flush request, control packet).
++ */
++static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
++		return SSAM_RQID_NOT_APPLICABLE;
++
++	return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
++}
++
++/**
++ * ssam_trace_get_request_tc() - Read the packet's request target category.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's request target category (TC) field if the
++ * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
++ * if not (e.g. flush request, control packet).
++ */
++static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
++		return SSAM_SSH_TC_NOT_APPLICABLE;
++
++	return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
++}
++
++#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */
++
++#define ssam_trace_get_command_field_u8(packet, field) \
++	((!(packet) || (packet)->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
++	 ? 0 : (packet)->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
++
++#define ssam_show_generic_u8_field(value)				\
++	__print_symbolic(value,						\
++		{ SSAM_U8_FIELD_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++#define ssam_show_frame_type(ty)					\
++	__print_symbolic(ty,						\
++		{ SSH_FRAME_TYPE_DATA_SEQ,		"DSEQ" },	\
++		{ SSH_FRAME_TYPE_DATA_NSQ,		"DNSQ" },	\
++		{ SSH_FRAME_TYPE_ACK,			"ACK"  },	\
++		{ SSH_FRAME_TYPE_NAK,			"NAK"  }	\
++	)
++
++#define ssam_show_packet_type(type)					\
++	__print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "",		\
++		{ BIT(SSH_PACKET_TY_FLUSH_BIT),		"F" },		\
++		{ BIT(SSH_PACKET_TY_SEQUENCED_BIT),	"S" },		\
++		{ BIT(SSH_PACKET_TY_BLOCKING_BIT),	"B" }		\
++	)
++
++#define ssam_show_packet_state(state)					\
++	__print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "",		\
++		{ BIT(SSH_PACKET_SF_LOCKED_BIT),	"L" },		\
++		{ BIT(SSH_PACKET_SF_QUEUED_BIT),	"Q" },		\
++		{ BIT(SSH_PACKET_SF_PENDING_BIT),	"P" },		\
++		{ BIT(SSH_PACKET_SF_TRANSMITTING_BIT),	"S" },		\
++		{ BIT(SSH_PACKET_SF_TRANSMITTED_BIT),	"T" },		\
++		{ BIT(SSH_PACKET_SF_ACKED_BIT),		"A" },		\
++		{ BIT(SSH_PACKET_SF_CANCELED_BIT),	"C" },		\
++		{ BIT(SSH_PACKET_SF_COMPLETED_BIT),	"F" }		\
++	)
++
++#define ssam_show_packet_seq(seq)					\
++	__print_symbolic(seq,						\
++		{ SSAM_SEQ_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++#define ssam_show_request_type(flags)					\
++	__print_flags((flags) & SSH_REQUEST_FLAGS_TY_MASK, "",		\
++		{ BIT(SSH_REQUEST_TY_FLUSH_BIT),	"F" },		\
++		{ BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),	"R" }		\
++	)
++
++#define ssam_show_request_state(flags)					\
++	__print_flags((flags) & SSH_REQUEST_FLAGS_SF_MASK, "",		\
++		{ BIT(SSH_REQUEST_SF_LOCKED_BIT),	"L" },		\
++		{ BIT(SSH_REQUEST_SF_QUEUED_BIT),	"Q" },		\
++		{ BIT(SSH_REQUEST_SF_PENDING_BIT),	"P" },		\
++		{ BIT(SSH_REQUEST_SF_TRANSMITTING_BIT),	"S" },		\
++		{ BIT(SSH_REQUEST_SF_TRANSMITTED_BIT),	"T" },		\
++		{ BIT(SSH_REQUEST_SF_RSPRCVD_BIT),	"A" },		\
++		{ BIT(SSH_REQUEST_SF_CANCELED_BIT),	"C" },		\
++		{ BIT(SSH_REQUEST_SF_COMPLETED_BIT),	"F" }		\
++	)
++
++#define ssam_show_request_id(rqid)					\
++	__print_symbolic(rqid,						\
++		{ SSAM_RQID_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++#define ssam_show_ssh_tc(rqid)						\
++	__print_symbolic(rqid,						\
++		{ SSAM_SSH_TC_NOT_APPLICABLE,		"N/A" },	\
++		{ SSAM_SSH_TC_SAM,			"SAM" },	\
++		{ SSAM_SSH_TC_BAT,			"BAT" },	\
++		{ SSAM_SSH_TC_TMP,			"TMP" },	\
++		{ SSAM_SSH_TC_PMC,			"PMC" },	\
++		{ SSAM_SSH_TC_FAN,			"FAN" },	\
++		{ SSAM_SSH_TC_PoM,			"PoM" },	\
++		{ SSAM_SSH_TC_DBG,			"DBG" },	\
++		{ SSAM_SSH_TC_KBD,			"KBD" },	\
++		{ SSAM_SSH_TC_FWU,			"FWU" },	\
++		{ SSAM_SSH_TC_UNI,			"UNI" },	\
++		{ SSAM_SSH_TC_LPC,			"LPC" },	\
++		{ SSAM_SSH_TC_TCL,			"TCL" },	\
++		{ SSAM_SSH_TC_SFL,			"SFL" },	\
++		{ SSAM_SSH_TC_KIP,			"KIP" },	\
++		{ SSAM_SSH_TC_EXT,			"EXT" },	\
++		{ SSAM_SSH_TC_BLD,			"BLD" },	\
++		{ SSAM_SSH_TC_BAS,			"BAS" },	\
++		{ SSAM_SSH_TC_SEN,			"SEN" },	\
++		{ SSAM_SSH_TC_SRQ,			"SRQ" },	\
++		{ SSAM_SSH_TC_MCU,			"MCU" },	\
++		{ SSAM_SSH_TC_HID,			"HID" },	\
++		{ SSAM_SSH_TC_TCH,			"TCH" },	\
++		{ SSAM_SSH_TC_BKL,			"BKL" },	\
++		{ SSAM_SSH_TC_TAM,			"TAM" },	\
++		{ SSAM_SSH_TC_ACC,			"ACC" },	\
++		{ SSAM_SSH_TC_UFI,			"UFI" },	\
++		{ SSAM_SSH_TC_USC,			"USC" },	\
++		{ SSAM_SSH_TC_PEN,			"PEN" },	\
++		{ SSAM_SSH_TC_VID,			"VID" },	\
++		{ SSAM_SSH_TC_AUD,			"AUD" },	\
++		{ SSAM_SSH_TC_SMC,			"SMC" },	\
++		{ SSAM_SSH_TC_KPD,			"KPD" },	\
++		{ SSAM_SSH_TC_REG,			"REG" }		\
++	)
++
++DECLARE_EVENT_CLASS(ssam_frame_class,
++	TP_PROTO(const struct ssh_frame *frame),
++
++	TP_ARGS(frame),
++
++	TP_STRUCT__entry(
++		__field(u8, type)
++		__field(u8, seq)
++		__field(u16, len)
++	),
++
++	TP_fast_assign(
++		__entry->type = frame->type;
++		__entry->seq = frame->seq;
++		__entry->len = get_unaligned_le16(&frame->len);
++	),
++
++	TP_printk("ty=%s, seq=%#04x, len=%u",
++		ssam_show_frame_type(__entry->type),
++		__entry->seq,
++		__entry->len
++	)
++);
++
++#define DEFINE_SSAM_FRAME_EVENT(name)				\
++	DEFINE_EVENT(ssam_frame_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_frame *frame),	\
++		TP_ARGS(frame)					\
++	)
++
++DECLARE_EVENT_CLASS(ssam_command_class,
++	TP_PROTO(const struct ssh_command *cmd, u16 len),
++
++	TP_ARGS(cmd, len),
++
++	TP_STRUCT__entry(
++		__field(u16, rqid)
++		__field(u16, len)
++		__field(u8, tc)
++		__field(u8, cid)
++		__field(u8, iid)
++	),
++
++	TP_fast_assign(
++		__entry->rqid = get_unaligned_le16(&cmd->rqid);
++		__entry->tc = cmd->tc;
++		__entry->cid = cmd->cid;
++		__entry->iid = cmd->iid;
++		__entry->len = len;
++	),
++
++	TP_printk("rqid=%#06x, tc=%s, cid=%#04x, iid=%#04x, len=%u",
++		__entry->rqid,
++		ssam_show_ssh_tc(__entry->tc),
++		__entry->cid,
++		__entry->iid,
++		__entry->len
++	)
++);
++
++#define DEFINE_SSAM_COMMAND_EVENT(name)					\
++	DEFINE_EVENT(ssam_command_class, ssam_##name,			\
++		TP_PROTO(const struct ssh_command *cmd, u16 len),	\
++		TP_ARGS(cmd, len)					\
++	)
++
++DECLARE_EVENT_CLASS(ssam_packet_class,
++	TP_PROTO(const struct ssh_packet *packet),
++
++	TP_ARGS(packet),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, priority)
++		__field(u16, length)
++		__field(u16, seq)
++	),
++
++	TP_fast_assign(
++		__entry->state = READ_ONCE(packet->state);
++		ssam_trace_ptr_uid(packet, __entry->uid);
++		__entry->priority = READ_ONCE(packet->priority);
++		__entry->length = packet->data.len;
++		__entry->seq = ssam_trace_get_packet_seq(packet);
++	),
++
++	TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s",
++		__entry->uid,
++		ssam_show_packet_seq(__entry->seq),
++		ssam_show_packet_type(__entry->state),
++		__entry->priority,
++		__entry->length,
++		ssam_show_packet_state(__entry->state)
++	)
++);
++
++#define DEFINE_SSAM_PACKET_EVENT(name)				\
++	DEFINE_EVENT(ssam_packet_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_packet *packet),	\
++		TP_ARGS(packet)					\
++	)
++
++DECLARE_EVENT_CLASS(ssam_packet_status_class,
++	TP_PROTO(const struct ssh_packet *packet, int status),
++
++	TP_ARGS(packet, status),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(int, status)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, priority)
++		__field(u16, length)
++		__field(u16, seq)
++	),
++
++	TP_fast_assign(
++		__entry->state = READ_ONCE(packet->state);
++		__entry->status = status;
++		ssam_trace_ptr_uid(packet, __entry->uid);
++		__entry->priority = READ_ONCE(packet->priority);
++		__entry->length = packet->data.len;
++		__entry->seq = ssam_trace_get_packet_seq(packet);
++	),
++
++	TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s, status=%d",
++		__entry->uid,
++		ssam_show_packet_seq(__entry->seq),
++		ssam_show_packet_type(__entry->state),
++		__entry->priority,
++		__entry->length,
++		ssam_show_packet_state(__entry->state),
++		__entry->status
++	)
++);
++
++#define DEFINE_SSAM_PACKET_STATUS_EVENT(name)				\
++	DEFINE_EVENT(ssam_packet_status_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_packet *packet, int status),	\
++		TP_ARGS(packet, status)					\
++	)
++
++DECLARE_EVENT_CLASS(ssam_request_class,
++	TP_PROTO(const struct ssh_request *request),
++
++	TP_ARGS(request),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(u32, rqid)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, tc)
++		__field(u16, cid)
++		__field(u16, iid)
++	),
++
++	TP_fast_assign(
++		const struct ssh_packet *p = &request->packet;
++
++		/* Use packet for UID so we can match requests to packets. */
++		__entry->state = READ_ONCE(request->state);
++		__entry->rqid = ssam_trace_get_request_id(p);
++		ssam_trace_ptr_uid(p, __entry->uid);
++		__entry->tc = ssam_trace_get_request_tc(p);
++		__entry->cid = ssam_trace_get_command_field_u8(p, cid);
++		__entry->iid = ssam_trace_get_command_field_u8(p, iid);
++	),
++
++	TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
++		__entry->uid,
++		ssam_show_request_id(__entry->rqid),
++		ssam_show_request_type(__entry->state),
++		ssam_show_request_state(__entry->state),
++		ssam_show_ssh_tc(__entry->tc),
++		ssam_show_generic_u8_field(__entry->cid),
++		ssam_show_generic_u8_field(__entry->iid)
++	)
++);
++
++#define DEFINE_SSAM_REQUEST_EVENT(name)				\
++	DEFINE_EVENT(ssam_request_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_request *request),	\
++		TP_ARGS(request)				\
++	)
++
++DECLARE_EVENT_CLASS(ssam_request_status_class,
++	TP_PROTO(const struct ssh_request *request, int status),
++
++	TP_ARGS(request, status),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(u32, rqid)
++		__field(int, status)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, tc)
++		__field(u16, cid)
++		__field(u16, iid)
++	),
++
++	TP_fast_assign(
++		const struct ssh_packet *p = &request->packet;
++
++		/* Use packet for UID so we can match requests to packets. */
++		__entry->state = READ_ONCE(request->state);
++		__entry->rqid = ssam_trace_get_request_id(p);
++		__entry->status = status;
++		ssam_trace_ptr_uid(p, __entry->uid);
++		__entry->tc = ssam_trace_get_request_tc(p);
++		__entry->cid = ssam_trace_get_command_field_u8(p, cid);
++		__entry->iid = ssam_trace_get_command_field_u8(p, iid);
++	),
++
++	TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
++		__entry->uid,
++		ssam_show_request_id(__entry->rqid),
++		ssam_show_request_type(__entry->state),
++		ssam_show_request_state(__entry->state),
++		ssam_show_ssh_tc(__entry->tc),
++		ssam_show_generic_u8_field(__entry->cid),
++		ssam_show_generic_u8_field(__entry->iid),
++		__entry->status
++	)
++);
++
++#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name)				\
++	DEFINE_EVENT(ssam_request_status_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_request *request, int status),\
++		TP_ARGS(request, status)				\
++	)
++
++DECLARE_EVENT_CLASS(ssam_alloc_class,
++	TP_PROTO(void *ptr, size_t len),
++
++	TP_ARGS(ptr, len),
++
++	TP_STRUCT__entry(
++		__field(size_t, len)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++	),
++
++	TP_fast_assign(
++		__entry->len = len;
++		ssam_trace_ptr_uid(ptr, __entry->uid);
++	),
++
++	TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
++);
++
++#define DEFINE_SSAM_ALLOC_EVENT(name)					\
++	DEFINE_EVENT(ssam_alloc_class, ssam_##name,			\
++		TP_PROTO(void *ptr, size_t len),			\
++		TP_ARGS(ptr, len)					\
++	)
++
++DECLARE_EVENT_CLASS(ssam_free_class,
++	TP_PROTO(void *ptr),
++
++	TP_ARGS(ptr),
++
++	TP_STRUCT__entry(
++		__array(char, uid, SSAM_PTR_UID_LEN)
++	),
++
++	TP_fast_assign(
++		ssam_trace_ptr_uid(ptr, __entry->uid);
++	),
++
++	TP_printk("uid=%s", __entry->uid)
++);
++
++#define DEFINE_SSAM_FREE_EVENT(name)					\
++	DEFINE_EVENT(ssam_free_class, ssam_##name,			\
++		TP_PROTO(void *ptr),					\
++		TP_ARGS(ptr)						\
++	)
++
++DECLARE_EVENT_CLASS(ssam_pending_class,
++	TP_PROTO(unsigned int pending),
++
++	TP_ARGS(pending),
++
++	TP_STRUCT__entry(
++		__field(unsigned int, pending)
++	),
++
++	TP_fast_assign(
++		__entry->pending = pending;
++	),
++
++	TP_printk("pending=%u", __entry->pending)
++);
++
++#define DEFINE_SSAM_PENDING_EVENT(name)					\
++	DEFINE_EVENT(ssam_pending_class, ssam_##name,			\
++		TP_PROTO(unsigned int pending),				\
++		TP_ARGS(pending)					\
++	)
++
++DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
++DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
++DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
++
++DEFINE_SSAM_PACKET_EVENT(packet_release);
++DEFINE_SSAM_PACKET_EVENT(packet_submit);
++DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
++DEFINE_SSAM_PACKET_EVENT(packet_timeout);
++DEFINE_SSAM_PACKET_EVENT(packet_cancel);
++DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
++DEFINE_SSAM_PENDING_EVENT(ptl_timeout_reap);
++
++DEFINE_SSAM_REQUEST_EVENT(request_submit);
++DEFINE_SSAM_REQUEST_EVENT(request_timeout);
++DEFINE_SSAM_REQUEST_EVENT(request_cancel);
++DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
++DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap);
++
++DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
++DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
++
++DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
++DEFINE_SSAM_FREE_EVENT(event_item_free);
++
++#endif /* _SURFACE_AGGREGATOR_TRACE_H */
++
++/* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace
++
++#include <trace/define_trace.h>
+-- 
+2.30.1
+
+From c71a61eda0fe21590831ace0887ca6714aa9d551 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:55 +0100
+Subject: [PATCH] platform/surface: aggregator: Add error injection
+ capabilities
+
+This commit adds error injection hooks to the Surface Serial Hub
+communication protocol implementation, to:
+
+ - simulate simple serial transmission errors,
+
+ - drop packets, requests, and responses, simulating communication
+   failures and potentially trigger retransmission timeouts, as well as
+
+ - inject invalid data into submitted and received packets.
+
+Together with the trace points introduced in the previous commit, these
+facilities are intended to aid in testing, validation, and debugging of
+the Surface Aggregator communication layer.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Link: https://lore.kernel.org/r/20201221183959.1186143-6-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/Kconfig   |  14 +
+ .../surface/aggregator/ssh_packet_layer.c     | 296 +++++++++++++++++-
+ .../surface/aggregator/ssh_request_layer.c    |  35 +++
+ drivers/platform/surface/aggregator/trace.h   |  31 ++
+ 4 files changed, 375 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
+index e9f4ad96e40a..e417bac67088 100644
+--- a/drivers/platform/surface/aggregator/Kconfig
++++ b/drivers/platform/surface/aggregator/Kconfig
+@@ -40,3 +40,17 @@ menuconfig SURFACE_AGGREGATOR
+ 	  Choose m if you want to build the SAM subsystem core and SSH driver as
+ 	  module, y if you want to build it into the kernel and n if you don't
+ 	  want it at all.
++
++config SURFACE_AGGREGATOR_ERROR_INJECTION
++	bool "Surface System Aggregator Module Error Injection Capabilities"
++	depends on SURFACE_AGGREGATOR
++	depends on FUNCTION_ERROR_INJECTION
++	help
++	  Provides error-injection capabilities for the Surface System
++	  Aggregator Module subsystem and Surface Serial Hub driver.
++
++	  Specifically, exports error injection hooks to be used with the
++	  kernel's function error injection capabilities to simulate underlying
++	  transport and communication problems, such as invalid data sent to or
++	  received from the EC, dropped data, and communication timeouts.
++	  Intended for development and debugging.
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+index c4f082e57372..74f0faaa2b27 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -7,6 +7,7 @@
+ 
+ #include <asm/unaligned.h>
+ #include <linux/atomic.h>
++#include <linux/error-injection.h>
+ #include <linux/jiffies.h>
+ #include <linux/kfifo.h>
+ #include <linux/kref.h>
+@@ -226,6 +227,286 @@
+  */
+ #define SSH_PTL_RX_FIFO_LEN			4096
+ 
++#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
++
++/**
++ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
++ *
++ * Useful to test detection and handling of automated re-transmits by the EC.
++ * Specifically of packets that the EC considers not-ACKed but the driver
++ * already considers ACKed (due to dropped ACK). In this case, the EC
++ * re-transmits the packet-to-be-ACKed and the driver should detect it as
++ * duplicate/already handled. Note that the driver should still send an ACK
++ * for the re-transmitted packet.
++ */
++static noinline bool ssh_ptl_should_drop_ack_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
++
++/**
++ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
++ *
++ * Useful to test/force automated (timeout-based) re-transmit by the EC.
++ * Specifically, packets that have not reached the driver completely/with valid
++ * checksums. Only useful in combination with receival of (injected) bad data.
++ */
++static noinline bool ssh_ptl_should_drop_nak_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
++
++/**
++ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
++ * data packet.
++ *
++ * Useful to test re-transmit timeout of the driver. If the data packet has not
++ * been ACKed after a certain time, the driver should re-transmit the packet up
++ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
++ */
++static noinline bool ssh_ptl_should_drop_dsq_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
++
++/**
++ * ssh_ptl_should_fail_write() - Error injection hook to make
++ * serdev_device_write() fail.
++ *
++ * Hook to simulate errors in serdev_device_write when transmitting packets.
++ */
++static noinline int ssh_ptl_should_fail_write(void)
++{
++	return 0;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
++
++/**
++ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
++ * data being sent to the EC.
++ *
++ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
++ * Causes the packet data to be actively corrupted by overwriting it with
++ * pre-defined values, such that it becomes invalid, causing the EC to respond
++ * with a NAK packet. Useful to test handling of NAK packets received by the
++ * driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_tx_data(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
++
++/**
++ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
++ * data being sent by the EC.
++ *
++ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
++ * test handling thereof in the driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
++
++/**
++ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
++ * data being sent by the EC.
++ *
++ * Hook to simulate invalid data/checksum of the message frame and test handling
++ * thereof in the driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_rx_data(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
++
++static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_ack_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_ack_packet(packet);
++	ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_nak_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_nak_packet(packet);
++	ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_dsq_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_dsq_packet(packet);
++	ptl_info(packet->ptl,
++		 "packet error injection: dropping sequenced data packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
++{
++	/* Ignore packets that don't carry any data (i.e. flush). */
++	if (!packet->data.ptr || !packet->data.len)
++		return false;
++
++	switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
++	case SSH_FRAME_TYPE_ACK:
++		return __ssh_ptl_should_drop_ack_packet(packet);
++
++	case SSH_FRAME_TYPE_NAK:
++		return __ssh_ptl_should_drop_nak_packet(packet);
++
++	case SSH_FRAME_TYPE_DATA_SEQ:
++		return __ssh_ptl_should_drop_dsq_packet(packet);
++
++	default:
++		return false;
++	}
++}
++
++static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
++			     const unsigned char *buf, size_t count)
++{
++	int status;
++
++	status = ssh_ptl_should_fail_write();
++	if (unlikely(status)) {
++		trace_ssam_ei_tx_fail_write(packet, status);
++		ptl_info(packet->ptl,
++			 "packet error injection: simulating transmit error %d, packet %p\n",
++			 status, packet);
++
++		return status;
++	}
++
++	return serdev_device_write_buf(ptl->serdev, buf, count);
++}
++
++static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
++{
++	/* Ignore packets that don't carry any data (i.e. flush). */
++	if (!packet->data.ptr || !packet->data.len)
++		return;
++
++	/* Only allow sequenced data packets to be modified. */
++	if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_tx_data()))
++		return;
++
++	trace_ssam_ei_tx_corrupt_data(packet);
++	ptl_info(packet->ptl,
++		 "packet error injection: simulating invalid transmit data on packet %p\n",
++		 packet);
++
++	/*
++	 * NB: The value 0xb3 has been chosen more or less randomly so that it
++	 * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
++	 * non-trivial (i.e. non-zero, non-0xff).
++	 */
++	memset(packet->data.ptr, 0xb3, packet->data.len);
++}
++
++static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
++					  struct ssam_span *data)
++{
++	struct ssam_span frame;
++
++	/* Check if there actually is something to corrupt. */
++	if (!sshp_find_syn(data, &frame))
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_rx_syn()))
++		return;
++
++	trace_ssam_ei_rx_corrupt_syn(data->len);
++
++	data->ptr[1] = 0xb3;	/* Set second byte of SYN to "random" value. */
++}
++
++static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
++					   struct ssam_span *frame)
++{
++	size_t payload_len, message_len;
++	struct ssh_frame *sshf;
++
++	/* Ignore incomplete messages, will get handled once it's complete. */
++	if (frame->len < SSH_MESSAGE_LENGTH(0))
++		return;
++
++	/* Ignore incomplete messages, part 2. */
++	payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
++	message_len = SSH_MESSAGE_LENGTH(payload_len);
++	if (frame->len < message_len)
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_rx_data()))
++		return;
++
++	sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
++	trace_ssam_ei_rx_corrupt_data(sshf);
++
++	/*
++	 * Flip bits in first byte of payload checksum. This is basically
++	 * equivalent to a payload/frame data error without us having to worry
++	 * about (the, arguably pretty small, probability of) accidental
++	 * checksum collisions.
++	 */
++	frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
++}
++
++#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
++
++static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
++{
++	return false;
++}
++
++static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
++				    struct ssh_packet *packet,
++				    const unsigned char *buf,
++				    size_t count)
++{
++	return serdev_device_write_buf(ptl->serdev, buf, count);
++}
++
++static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
++{
++}
++
++static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
++						 struct ssam_span *data)
++{
++}
++
++static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
++						  struct ssam_span *frame)
++{
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
++
+ static void __ssh_ptl_packet_release(struct kref *kref)
+ {
+ 	struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
+@@ -776,6 +1057,13 @@ static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
+ 	if (unlikely(!packet->data.ptr))
+ 		return 0;
+ 
++	/* Error injection: drop packet to simulate transmission problem. */
++	if (ssh_ptl_should_drop_packet(packet))
++		return 0;
++
++	/* Error injection: simulate invalid packet data. */
++	ssh_ptl_tx_inject_invalid_data(packet);
++
+ 	ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
+ 	print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ 			     packet->data.ptr, packet->data.len, false);
+@@ -787,7 +1075,7 @@ static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
+ 		buf = packet->data.ptr + offset;
+ 		len = packet->data.len - offset;
+ 
+-		status = serdev_device_write_buf(ptl->serdev, buf, len);
++		status = ssh_ptl_write_buf(ptl, packet, buf, len);
+ 		if (status < 0)
+ 			return status;
+ 
+@@ -1400,6 +1688,9 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+ 	bool syn_found;
+ 	int status;
+ 
++	/* Error injection: Modify data to simulate corrupt SYN bytes. */
++	ssh_ptl_rx_inject_invalid_syn(ptl, source);
++
+ 	/* Find SYN. */
+ 	syn_found = sshp_find_syn(source, &aligned);
+ 
+@@ -1430,6 +1721,9 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+ 	if (unlikely(!syn_found))
+ 		return aligned.ptr - source->ptr;
+ 
++	/* Error injection: Modify data to simulate corruption. */
++	ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
++
+ 	/* Parse and validate frame. */
+ 	status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
+ 				  SSH_PTL_RX_BUF_LEN);
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+index b649d71840fd..bb1c862411a2 100644
+--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -8,6 +8,7 @@
+ #include <asm/unaligned.h>
+ #include <linux/atomic.h>
+ #include <linux/completion.h>
++#include <linux/error-injection.h>
+ #include <linux/ktime.h>
+ #include <linux/limits.h>
+ #include <linux/list.h>
+@@ -58,6 +59,30 @@
+  */
+ #define SSH_RTL_TX_BATCH		10
+ 
++#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
++
++/**
++ * ssh_rtl_should_drop_response() - Error injection hook to drop request
++ * responses.
++ *
++ * Useful to cause request transmission timeouts in the driver by dropping the
++ * response to a request.
++ */
++static noinline bool ssh_rtl_should_drop_response(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
++
++#else
++
++static inline bool ssh_rtl_should_drop_response(void)
++{
++	return false;
++}
++
++#endif
++
+ static u16 ssh_request_get_rqid(struct ssh_request *rqst)
+ {
+ 	return get_unaligned_le16(rqst->packet.data.ptr
+@@ -459,6 +484,16 @@ static void ssh_rtl_complete(struct ssh_rtl *rtl,
+ 		if (unlikely(ssh_request_get_rqid(p) != rqid))
+ 			continue;
+ 
++		/* Simulate response timeout. */
++		if (ssh_rtl_should_drop_response()) {
++			spin_unlock(&rtl->pending.lock);
++
++			trace_ssam_ei_rx_drop_response(p);
++			rtl_info(rtl, "request error injection: dropping response for request %p\n",
++				 &p->packet);
++			return;
++		}
++
+ 		/*
+ 		 * Mark as "response received" and "locked" as we're going to
+ 		 * complete it.
+diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
+index dcca8007d876..eb332bb53ae4 100644
+--- a/drivers/platform/surface/aggregator/trace.h
++++ b/drivers/platform/surface/aggregator/trace.h
+@@ -565,6 +565,28 @@ DECLARE_EVENT_CLASS(ssam_pending_class,
+ 		TP_ARGS(pending)					\
+ 	)
+ 
++DECLARE_EVENT_CLASS(ssam_data_class,
++	TP_PROTO(size_t length),
++
++	TP_ARGS(length),
++
++	TP_STRUCT__entry(
++		__field(size_t, length)
++	),
++
++	TP_fast_assign(
++		__entry->length = length;
++	),
++
++	TP_printk("length=%zu", __entry->length)
++);
++
++#define DEFINE_SSAM_DATA_EVENT(name)					\
++	DEFINE_EVENT(ssam_data_class, ssam_##name,			\
++		TP_PROTO(size_t length),				\
++		TP_ARGS(length)						\
++	)
++
+ DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
+ DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
+ DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
+@@ -583,6 +605,15 @@ DEFINE_SSAM_REQUEST_EVENT(request_cancel);
+ DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
+ DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap);
+ 
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
++DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
++DEFINE_SSAM_DATA_EVENT(ei_rx_corrupt_syn);
++DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
++DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
++
+ DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
+ DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
+ 
+-- 
+2.30.1
+
+From 38a8ac1671564ba6cd91fdf7b226289da6e4b86a Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:56 +0100
+Subject: [PATCH] platform/surface: aggregator: Add dedicated bus and device
+ type
+
+The Surface Aggregator EC provides varying functionality, depending on
+the Surface device. To manage this functionality, we use dedicated
+client devices for each subsystem or virtual device of the EC. While
+some of these clients are described as standard devices in ACPI and the
+corresponding client drivers can be implemented as platform drivers in
+the kernel (making use of the controller API already present), many
+devices, especially on newer Surface models, cannot be found there.
+
+To simplify management of these devices, we introduce a new bus and
+client device type for the Surface Aggregator subsystem. The new device
+type takes care of managing the controller reference, essentially
+guaranteeing its validity for as long as the client device exists, thus
+alleviating the need to manually establish device links for that purpose
+in the client driver (as has to be done with the platform devices).
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-7-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/Kconfig  |  12 +
+ drivers/platform/surface/aggregator/Makefile |   4 +
+ drivers/platform/surface/aggregator/bus.c    | 415 ++++++++++++++++++
+ drivers/platform/surface/aggregator/bus.h    |  27 ++
+ drivers/platform/surface/aggregator/core.c   |  12 +
+ include/linux/mod_devicetable.h              |  18 +
+ include/linux/surface_aggregator/device.h    | 423 +++++++++++++++++++
+ scripts/mod/devicetable-offsets.c            |   8 +
+ scripts/mod/file2alias.c                     |  23 +
+ 9 files changed, 942 insertions(+)
+ create mode 100644 drivers/platform/surface/aggregator/bus.c
+ create mode 100644 drivers/platform/surface/aggregator/bus.h
+ create mode 100644 include/linux/surface_aggregator/device.h
+
+diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
+index e417bac67088..3aaeea9f0433 100644
+--- a/drivers/platform/surface/aggregator/Kconfig
++++ b/drivers/platform/surface/aggregator/Kconfig
+@@ -41,6 +41,18 @@ menuconfig SURFACE_AGGREGATOR
+ 	  module, y if you want to build it into the kernel and n if you don't
+ 	  want it at all.
+ 
++config SURFACE_AGGREGATOR_BUS
++	bool "Surface System Aggregator Module Bus"
++	depends on SURFACE_AGGREGATOR
++	default y
++	help
++	  Expands the Surface System Aggregator Module (SSAM) core driver by
++	  providing a dedicated bus and client-device type.
++
++	  This bus and device type are intended to provide and simplify support
++	  for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are
++	  not auto-detectable via the conventional means (e.g. ACPI).
++
+ config SURFACE_AGGREGATOR_ERROR_INJECTION
+ 	bool "Surface System Aggregator Module Error Injection Capabilities"
+ 	depends on SURFACE_AGGREGATOR
+diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
+index b8b24c8ec310..c112e2c7112b 100644
+--- a/drivers/platform/surface/aggregator/Makefile
++++ b/drivers/platform/surface/aggregator/Makefile
+@@ -11,3 +11,7 @@ surface_aggregator-objs += ssh_parser.o
+ surface_aggregator-objs += ssh_packet_layer.o
+ surface_aggregator-objs += ssh_request_layer.o
+ surface_aggregator-objs += controller.o
++
++ifeq ($(CONFIG_SURFACE_AGGREGATOR_BUS),y)
++surface_aggregator-objs += bus.o
++endif
+diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
+new file mode 100644
+index 000000000000..a9b660af0917
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/bus.c
+@@ -0,0 +1,415 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module bus and device integration.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++#include "bus.h"
++#include "controller.h"
++
++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
++			     char *buf)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	return sysfs_emit(buf, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n",
++			sdev->uid.domain, sdev->uid.category, sdev->uid.target,
++			sdev->uid.instance, sdev->uid.function);
++}
++static DEVICE_ATTR_RO(modalias);
++
++static struct attribute *ssam_device_attrs[] = {
++	&dev_attr_modalias.attr,
++	NULL,
++};
++ATTRIBUTE_GROUPS(ssam_device);
++
++static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
++			      sdev->uid.domain, sdev->uid.category,
++			      sdev->uid.target, sdev->uid.instance,
++			      sdev->uid.function);
++}
++
++static void ssam_device_release(struct device *dev)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	ssam_controller_put(sdev->ctrl);
++	kfree(sdev);
++}
++
++const struct device_type ssam_device_type = {
++	.name    = "surface_aggregator_device",
++	.groups  = ssam_device_groups,
++	.uevent  = ssam_device_uevent,
++	.release = ssam_device_release,
++};
++EXPORT_SYMBOL_GPL(ssam_device_type);
++
++/**
++ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
++ * @ctrl: The controller under which the device should be added.
++ * @uid:  The UID of the device to be added.
++ *
++ * Allocates and initializes a new client device. The parent of the device
++ * will be set to the controller device and the name will be set based on the
++ * UID. Note that the device still has to be added via ssam_device_add().
++ * Refer to that function for more details.
++ *
++ * Return: Returns the newly allocated and initialized SSAM client device, or
++ * %NULL if it could not be allocated.
++ */
++struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
++				      struct ssam_device_uid uid)
++{
++	struct ssam_device *sdev;
++
++	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
++	if (!sdev)
++		return NULL;
++
++	device_initialize(&sdev->dev);
++	sdev->dev.bus = &ssam_bus_type;
++	sdev->dev.type = &ssam_device_type;
++	sdev->dev.parent = ssam_controller_device(ctrl);
++	sdev->ctrl = ssam_controller_get(ctrl);
++	sdev->uid = uid;
++
++	dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
++		     sdev->uid.domain, sdev->uid.category, sdev->uid.target,
++		     sdev->uid.instance, sdev->uid.function);
++
++	return sdev;
++}
++EXPORT_SYMBOL_GPL(ssam_device_alloc);
++
++/**
++ * ssam_device_add() - Add a SSAM client device.
++ * @sdev: The SSAM client device to be added.
++ *
++ * Added client devices must be guaranteed to always have a valid and active
++ * controller. Thus, this function will fail with %-ENODEV if the controller
++ * of the device has not been initialized yet, has been suspended, or has been
++ * shut down.
++ *
++ * The caller of this function should ensure that the corresponding call to
++ * ssam_device_remove() is issued before the controller is shut down. If the
++ * added device is a direct child of the controller device (default), it will
++ * be automatically removed when the controller is shut down.
++ *
++ * By default, the controller device will become the parent of the newly
++ * created client device. The parent may be changed before ssam_device_add is
++ * called, but care must be taken that a) the correct suspend/resume ordering
++ * is guaranteed and b) the client device does not outlive the controller,
++ * i.e. that the device is removed before the controller is being shut down.
++ * In case these guarantees have to be manually enforced, please refer to the
++ * ssam_client_link() and ssam_client_bind() functions, which are intended to
++ * set up device-links for this purpose.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssam_device_add(struct ssam_device *sdev)
++{
++	int status;
++
++	/*
++	 * Ensure that we can only add new devices to a controller if it has
++	 * been started and is not going away soon. This works in combination
++	 * with ssam_controller_remove_clients to ensure driver presence for the
++	 * controller device, i.e. it ensures that the controller (sdev->ctrl)
++	 * is always valid and can be used for requests as long as the client
++	 * device we add here is registered as child under it. This essentially
++	 * guarantees that the client driver can always expect the preconditions
++	 * for functions like ssam_request_sync (controller has to be started
++	 * and is not suspended) to hold and thus does not have to check for
++	 * them.
++	 *
++	 * Note that for this to work, the controller has to be a parent device.
++	 * If it is not a direct parent, care has to be taken that the device is
++	 * removed via ssam_device_remove(), as device_unregister does not
++	 * remove child devices recursively.
++	 */
++	ssam_controller_statelock(sdev->ctrl);
++
++	if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_stateunlock(sdev->ctrl);
++		return -ENODEV;
++	}
++
++	status = device_add(&sdev->dev);
++
++	ssam_controller_stateunlock(sdev->ctrl);
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_device_add);
++
++/**
++ * ssam_device_remove() - Remove a SSAM client device.
++ * @sdev: The device to remove.
++ *
++ * Removes and unregisters the provided SSAM client device.
++ */
++void ssam_device_remove(struct ssam_device *sdev)
++{
++	device_unregister(&sdev->dev);
++}
++EXPORT_SYMBOL_GPL(ssam_device_remove);
++
++/**
++ * ssam_device_id_compatible() - Check if a device ID matches a UID.
++ * @id:  The device ID as potential match.
++ * @uid: The device UID matching against.
++ *
++ * Check if the given ID is a match for the given UID, i.e. if a device with
++ * the provided UID is compatible to the given ID following the match rules
++ * described in its &ssam_device_id.match_flags member.
++ *
++ * Return: Returns %true if the given UID is compatible to the match rule
++ * described by the given ID, %false otherwise.
++ */
++static bool ssam_device_id_compatible(const struct ssam_device_id *id,
++				      struct ssam_device_uid uid)
++{
++	if (id->domain != uid.domain || id->category != uid.category)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
++		return false;
++
++	return true;
++}
++
++/**
++ * ssam_device_id_is_null() - Check if a device ID is null.
++ * @id: The device ID to check.
++ *
++ * Check if a given device ID is null, i.e. all zeros. Used to check for the
++ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
++ *
++ * Return: Returns %true if the given ID represents a null ID, %false
++ * otherwise.
++ */
++static bool ssam_device_id_is_null(const struct ssam_device_id *id)
++{
++	return id->match_flags == 0 &&
++		id->domain == 0 &&
++		id->category == 0 &&
++		id->target == 0 &&
++		id->instance == 0 &&
++		id->function == 0 &&
++		id->driver_data == 0;
++}
++
++/**
++ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
++ * @table: The table to search in.
++ * @uid:   The UID to matched against the individual table entries.
++ *
++ * Find the first match for the provided device UID in the provided ID table
++ * and return it. Returns %NULL if no match could be found.
++ */
++const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
++						  const struct ssam_device_uid uid)
++{
++	const struct ssam_device_id *id;
++
++	for (id = table; !ssam_device_id_is_null(id); ++id)
++		if (ssam_device_id_compatible(id, uid))
++			return id;
++
++	return NULL;
++}
++EXPORT_SYMBOL_GPL(ssam_device_id_match);
++
++/**
++ * ssam_device_get_match() - Find and return the ID matching the device in the
++ * ID table of the bound driver.
++ * @dev: The device for which to get the matching ID table entry.
++ *
++ * Find the fist match for the UID of the device in the ID table of the
++ * currently bound driver and return it. Returns %NULL if the device does not
++ * have a driver bound to it, the driver does not have match_table (i.e. it is
++ * %NULL), or there is no match in the driver's match_table.
++ *
++ * This function essentially calls ssam_device_id_match() with the ID table of
++ * the bound device driver and the UID of the device.
++ *
++ * Return: Returns the first match for the UID of the device in the device
++ * driver's match table, or %NULL if no such match could be found.
++ */
++const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev)
++{
++	const struct ssam_device_driver *sdrv;
++
++	sdrv = to_ssam_device_driver(dev->dev.driver);
++	if (!sdrv)
++		return NULL;
++
++	if (!sdrv->match_table)
++		return NULL;
++
++	return ssam_device_id_match(sdrv->match_table, dev->uid);
++}
++EXPORT_SYMBOL_GPL(ssam_device_get_match);
++
++/**
++ * ssam_device_get_match_data() - Find the ID matching the device in the
++ * ID table of the bound driver and return its ``driver_data`` member.
++ * @dev: The device for which to get the match data.
++ *
++ * Find the fist match for the UID of the device in the ID table of the
++ * corresponding driver and return its driver_data. Returns %NULL if the
++ * device does not have a driver bound to it, the driver does not have
++ * match_table (i.e. it is %NULL), there is no match in the driver's
++ * match_table, or the match does not have any driver_data.
++ *
++ * This function essentially calls ssam_device_get_match() and, if any match
++ * could be found, returns its ``struct ssam_device_id.driver_data`` member.
++ *
++ * Return: Returns the driver data associated with the first match for the UID
++ * of the device in the device driver's match table, or %NULL if no such match
++ * could be found.
++ */
++const void *ssam_device_get_match_data(const struct ssam_device *dev)
++{
++	const struct ssam_device_id *id;
++
++	id = ssam_device_get_match(dev);
++	if (!id)
++		return NULL;
++
++	return (const void *)id->driver_data;
++}
++EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
++
++static int ssam_bus_match(struct device *dev, struct device_driver *drv)
++{
++	struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	if (!is_ssam_device(dev))
++		return 0;
++
++	return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
++}
++
++static int ssam_bus_probe(struct device *dev)
++{
++	return to_ssam_device_driver(dev->driver)
++		->probe(to_ssam_device(dev));
++}
++
++static int ssam_bus_remove(struct device *dev)
++{
++	struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
++
++	if (sdrv->remove)
++		sdrv->remove(to_ssam_device(dev));
++
++	return 0;
++}
++
++struct bus_type ssam_bus_type = {
++	.name   = "surface_aggregator",
++	.match  = ssam_bus_match,
++	.probe  = ssam_bus_probe,
++	.remove = ssam_bus_remove,
++};
++EXPORT_SYMBOL_GPL(ssam_bus_type);
++
++/**
++ * __ssam_device_driver_register() - Register a SSAM client device driver.
++ * @sdrv:  The driver to register.
++ * @owner: The module owning the provided driver.
++ *
++ * Please refer to the ssam_device_driver_register() macro for the normal way
++ * to register a driver from inside its owning module.
++ */
++int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
++				  struct module *owner)
++{
++	sdrv->driver.owner = owner;
++	sdrv->driver.bus = &ssam_bus_type;
++
++	/* force drivers to async probe so I/O is possible in probe */
++	sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
++
++	return driver_register(&sdrv->driver);
++}
++EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
++
++/**
++ * ssam_device_driver_unregister - Unregister a SSAM device driver.
++ * @sdrv: The driver to unregister.
++ */
++void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
++{
++	driver_unregister(&sdrv->driver);
++}
++EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
++
++static int ssam_remove_device(struct device *dev, void *_data)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	if (is_ssam_device(dev))
++		ssam_device_remove(sdev);
++
++	return 0;
++}
++
++/**
++ * ssam_controller_remove_clients() - Remove SSAM client devices registered as
++ * direct children under the given controller.
++ * @ctrl: The controller to remove all direct clients for.
++ *
++ * Remove all SSAM client devices registered as direct children under the
++ * given controller. Note that this only accounts for direct children of the
++ * controller device. This does not take care of any client devices where the
++ * parent device has been manually set before calling ssam_device_add. Refer
++ * to ssam_device_add()/ssam_device_remove() for more details on those cases.
++ *
++ * To avoid new devices being added in parallel to this call, the main
++ * controller lock (not statelock) must be held during this (and if
++ * necessary, any subsequent deinitialization) call.
++ */
++void ssam_controller_remove_clients(struct ssam_controller *ctrl)
++{
++	struct device *dev;
++
++	dev = ssam_controller_device(ctrl);
++	device_for_each_child_reverse(dev, NULL, ssam_remove_device);
++}
++
++/**
++ * ssam_bus_register() - Register and set-up the SSAM client device bus.
++ */
++int ssam_bus_register(void)
++{
++	return bus_register(&ssam_bus_type);
++}
++
++/**
++ * ssam_bus_unregister() - Unregister the SSAM client device bus.
++ */
++void ssam_bus_unregister(void)
++{
++	return bus_unregister(&ssam_bus_type);
++}
+diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
+new file mode 100644
+index 000000000000..7712baaed6a5
+--- /dev/null
++++ b/drivers/platform/surface/aggregator/bus.h
+@@ -0,0 +1,27 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module bus and device integration.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_BUS_H
++#define _SURFACE_AGGREGATOR_BUS_H
++
++#include <linux/surface_aggregator/controller.h>
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++void ssam_controller_remove_clients(struct ssam_controller *ctrl);
++
++int ssam_bus_register(void);
++void ssam_bus_unregister(void);
++
++#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++static inline void ssam_controller_remove_clients(struct ssam_controller *ctrl) {}
++static inline int ssam_bus_register(void) { return 0; }
++static inline void ssam_bus_unregister(void) {}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++#endif /* _SURFACE_AGGREGATOR_BUS_H */
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index b6a9dea53592..8dc2c267bcd6 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -22,6 +22,8 @@
+ #include <linux/sysfs.h>
+ 
+ #include <linux/surface_aggregator/controller.h>
++
++#include "bus.h"
+ #include "controller.h"
+ 
+ #define CREATE_TRACE_POINTS
+@@ -739,6 +741,9 @@ static void ssam_serial_hub_remove(struct serdev_device *serdev)
+ 	sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
+ 	ssam_controller_lock(ctrl);
+ 
++	/* Remove all client devices. */
++	ssam_controller_remove_clients(ctrl);
++
+ 	/* Act as if suspending to silence events. */
+ 	status = ssam_ctrl_notif_display_off(ctrl);
+ 	if (status) {
+@@ -791,6 +796,10 @@ static int __init ssam_core_init(void)
+ {
+ 	int status;
+ 
++	status = ssam_bus_register();
++	if (status)
++		goto err_bus;
++
+ 	status = ssh_ctrl_packet_cache_init();
+ 	if (status)
+ 		goto err_cpkg;
+@@ -810,6 +819,8 @@ static int __init ssam_core_init(void)
+ err_evitem:
+ 	ssh_ctrl_packet_cache_destroy();
+ err_cpkg:
++	ssam_bus_unregister();
++err_bus:
+ 	return status;
+ }
+ module_init(ssam_core_init);
+@@ -819,6 +830,7 @@ static void __exit ssam_core_exit(void)
+ 	serdev_device_driver_unregister(&ssam_serial_hub);
+ 	ssam_event_item_cache_destroy();
+ 	ssh_ctrl_packet_cache_destroy();
++	ssam_bus_unregister();
+ }
+ module_exit(ssam_core_exit);
+ 
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index c425290b21e2..935060955152 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -846,4 +846,22 @@ struct auxiliary_device_id {
+ 	kernel_ulong_t driver_data;
+ };
+ 
++/* Surface System Aggregator Module */
++
++#define SSAM_MATCH_TARGET	0x1
++#define SSAM_MATCH_INSTANCE	0x2
++#define SSAM_MATCH_FUNCTION	0x4
++
++struct ssam_device_id {
++	__u8 match_flags;
++
++	__u8 domain;
++	__u8 category;
++	__u8 target;
++	__u8 instance;
++	__u8 function;
++
++	kernel_ulong_t driver_data;
++};
++
+ #endif /* LINUX_MOD_DEVICETABLE_H */
+diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
+new file mode 100644
+index 000000000000..02f3e06c0a60
+--- /dev/null
++++ b/include/linux/surface_aggregator/device.h
+@@ -0,0 +1,423 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
++ *
++ * Main interface for the surface-aggregator bus, surface-aggregator client
++ * devices, and respective drivers building on top of the SSAM controller.
++ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
++ * subsystem.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
++#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
++
++#include <linux/device.h>
++#include <linux/mod_devicetable.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++
++
++/* -- Surface System Aggregator Module bus. --------------------------------- */
++
++/**
++ * enum ssam_device_domain - SAM device domain.
++ * @SSAM_DOMAIN_VIRTUAL:   Virtual device.
++ * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub.
++ */
++enum ssam_device_domain {
++	SSAM_DOMAIN_VIRTUAL   = 0x00,
++	SSAM_DOMAIN_SERIALHUB = 0x01,
++};
++
++/**
++ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
++ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
++ */
++enum ssam_virtual_tc {
++	SSAM_VIRTUAL_TC_HUB = 0x00,
++};
++
++/**
++ * struct ssam_device_uid - Unique identifier for SSAM device.
++ * @domain:   Domain of the device.
++ * @category: Target category of the device.
++ * @target:   Target ID of the device.
++ * @instance: Instance ID of the device.
++ * @function: Sub-function of the device. This field can be used to split a
++ *            single SAM device into multiple virtual subdevices to separate
++ *            different functionality of that device and allow one driver per
++ *            such functionality.
++ */
++struct ssam_device_uid {
++	u8 domain;
++	u8 category;
++	u8 target;
++	u8 instance;
++	u8 function;
++};
++
++/*
++ * Special values for device matching.
++ *
++ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
++ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
++ * match_flags member of the device ID structure. Do not use them directly
++ * with struct ssam_device_id or struct ssam_device_uid.
++ */
++#define SSAM_ANY_TID		0xffff
++#define SSAM_ANY_IID		0xffff
++#define SSAM_ANY_FUN		0xffff
++
++/**
++ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
++ * parameters.
++ * @d:   Domain of the device.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters. See &struct
++ * ssam_device_uid for details regarding the parameters. The special values
++ * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
++ * matching should ignore target ID, instance ID, and/or sub-function,
++ * respectively. This macro initializes the ``match_flags`` field based on the
++ * given parameters.
++ *
++ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
++ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_DEVICE(d, cat, tid, iid, fun)					\
++	.match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0)	\
++		     | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0)	\
++		     | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0),	\
++	.domain   = d,								\
++	.category = cat,							\
++	.target   = ((tid) != SSAM_ANY_TID) ? (tid) : 0,			\
++	.instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0,			\
++	.function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0				\
++
++/**
++ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
++ * the given parameters.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters in the
++ * virtual domain. See &struct ssam_device_uid for details regarding the
++ * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
++ * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
++ * instance ID, and/or sub-function, respectively. This macro initializes the
++ * ``match_flags`` field based on the given parameters.
++ *
++ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
++ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_VDEV(cat, tid, iid, fun) \
++	SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
++
++/**
++ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
++ * with the given parameters.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters in the SSH
++ * domain. See &struct ssam_device_uid for details regarding the parameters.
++ * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
++ * used to specify that matching should ignore target ID, instance ID, and/or
++ * sub-function, respectively. This macro initializes the ``match_flags``
++ * field based on the given parameters.
++ *
++ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
++ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_SDEV(cat, tid, iid, fun) \
++	SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
++
++/**
++ * struct ssam_device - SSAM client device.
++ * @dev:  Driver model representation of the device.
++ * @ctrl: SSAM controller managing this device.
++ * @uid:  UID identifying the device.
++ */
++struct ssam_device {
++	struct device dev;
++	struct ssam_controller *ctrl;
++
++	struct ssam_device_uid uid;
++};
++
++/**
++ * struct ssam_device_driver - SSAM client device driver.
++ * @driver:      Base driver model structure.
++ * @match_table: Match table specifying which devices the driver should bind to.
++ * @probe:       Called when the driver is being bound to a device.
++ * @remove:      Called when the driver is being unbound from the device.
++ */
++struct ssam_device_driver {
++	struct device_driver driver;
++
++	const struct ssam_device_id *match_table;
++
++	int  (*probe)(struct ssam_device *sdev);
++	void (*remove)(struct ssam_device *sdev);
++};
++
++extern struct bus_type ssam_bus_type;
++extern const struct device_type ssam_device_type;
++
++/**
++ * is_ssam_device() - Check if the given device is a SSAM client device.
++ * @d: The device to test the type of.
++ *
++ * Return: Returns %true if the specified device is of type &struct
++ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
++ * otherwise.
++ */
++static inline bool is_ssam_device(struct device *d)
++{
++	return d->type == &ssam_device_type;
++}
++
++/**
++ * to_ssam_device() - Casts the given device to a SSAM client device.
++ * @d: The device to cast.
++ *
++ * Casts the given &struct device to a &struct ssam_device. The caller has to
++ * ensure that the given device is actually enclosed in a &struct ssam_device,
++ * e.g. by calling is_ssam_device().
++ *
++ * Return: Returns a pointer to the &struct ssam_device wrapping the given
++ * device @d.
++ */
++static inline struct ssam_device *to_ssam_device(struct device *d)
++{
++	return container_of(d, struct ssam_device, dev);
++}
++
++/**
++ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
++ * device driver.
++ * @d: The driver to cast.
++ *
++ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
++ * caller has to ensure that the given driver is actually enclosed in a
++ * &struct ssam_device_driver.
++ *
++ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
++ * given device driver @d.
++ */
++static inline
++struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
++{
++	return container_of(d, struct ssam_device_driver, driver);
++}
++
++const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
++						  const struct ssam_device_uid uid);
++
++const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev);
++
++const void *ssam_device_get_match_data(const struct ssam_device *dev);
++
++struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
++				      struct ssam_device_uid uid);
++
++int ssam_device_add(struct ssam_device *sdev);
++void ssam_device_remove(struct ssam_device *sdev);
++
++/**
++ * ssam_device_get() - Increment reference count of SSAM client device.
++ * @sdev: The device to increment the reference count of.
++ *
++ * Increments the reference count of the given SSAM client device by
++ * incrementing the reference count of the enclosed &struct device via
++ * get_device().
++ *
++ * See ssam_device_put() for the counter-part of this function.
++ *
++ * Return: Returns the device provided as input.
++ */
++static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
++{
++	return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
++}
++
++/**
++ * ssam_device_put() - Decrement reference count of SSAM client device.
++ * @sdev: The device to decrement the reference count of.
++ *
++ * Decrements the reference count of the given SSAM client device by
++ * decrementing the reference count of the enclosed &struct device via
++ * put_device().
++ *
++ * See ssam_device_get() for the counter-part of this function.
++ */
++static inline void ssam_device_put(struct ssam_device *sdev)
++{
++	if (sdev)
++		put_device(&sdev->dev);
++}
++
++/**
++ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
++ * @sdev: The device to get the driver-data from.
++ *
++ * Return: Returns the driver-data of the given device, previously set via
++ * ssam_device_set_drvdata().
++ */
++static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
++{
++	return dev_get_drvdata(&sdev->dev);
++}
++
++/**
++ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
++ * @sdev: The device to set the driver-data of.
++ * @data: The data to set the device's driver-data pointer to.
++ */
++static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
++{
++	dev_set_drvdata(&sdev->dev, data);
++}
++
++int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
++void ssam_device_driver_unregister(struct ssam_device_driver *d);
++
++/**
++ * ssam_device_driver_register() - Register a SSAM client device driver.
++ * @drv: The driver to register.
++ */
++#define ssam_device_driver_register(drv) \
++	__ssam_device_driver_register(drv, THIS_MODULE)
++
++/**
++ * module_ssam_device_driver() - Helper macro for SSAM device driver
++ * registration.
++ * @drv: The driver managed by this module.
++ *
++ * Helper macro to register a SSAM device driver via module_init() and
++ * module_exit(). This macro may only be used once per module and replaces the
++ * aforementioned definitions.
++ */
++#define module_ssam_device_driver(drv)			\
++	module_driver(drv, ssam_device_driver_register,	\
++		      ssam_device_driver_unregister)
++
++
++/* -- Helpers for client-device requests. ----------------------------------- */
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
++ * request function with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. Device
++ * specifying parameters are not hard-coded, but instead are provided via the
++ * client device, specifically its UID, supplied when calling this function.
++ * The generated function takes care of setting up the request struct, buffer
++ * allocation, as well as execution of the request itself, returning once the
++ * request has been fully completed. The required transport buffer will be
++ * allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev)``,
++ * returning the status of the request, which is zero on success and negative
++ * on failure. The ``sdev`` parameter specifies both the target device of the
++ * request and by association the controller via which the request is sent.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...)			\
++	SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec)		\
++	int name(struct ssam_device *sdev)				\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
++ * request function with argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. Device specifying parameters are not hard-coded, but instead
++ * are provided via the client device, specifically its UID, supplied when
++ * calling this function. The generated function takes care of setting up the
++ * request struct, buffer allocation, as well as execution of the request
++ * itself, returning once the request has been fully completed. The required
++ * transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev,
++ * const atype *arg)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``sdev`` parameter specifies both the
++ * target device of the request and by association the controller via which
++ * the request is sent. The request's argument is specified via the ``arg``
++ * pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...)		\
++	SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec)	\
++	int name(struct ssam_device *sdev, const atype *arg)		\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance, arg);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
++ * request function with return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. Device specifying parameters are not hard-coded, but instead
++ * are provided via the client device, specifically its UID, supplied when
++ * calling this function. The generated function takes care of setting up the
++ * request struct, buffer allocation, as well as execution of the request
++ * itself, returning once the request has been fully completed. The required
++ * transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev,
++ * rtype *ret)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``sdev`` parameter specifies both the
++ * target device of the request and by association the controller via which
++ * the request is sent. The request's return value is written to the memory
++ * pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...)		\
++	SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec)	\
++	int name(struct ssam_device *sdev, rtype *ret)			\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance, ret);		\
++	}
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
+diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
+index e377f52dbfa3..f078eeb0a961 100644
+--- a/scripts/mod/devicetable-offsets.c
++++ b/scripts/mod/devicetable-offsets.c
+@@ -246,5 +246,13 @@ int main(void)
+ 	DEVID(auxiliary_device_id);
+ 	DEVID_FIELD(auxiliary_device_id, name);
+ 
++	DEVID(ssam_device_id);
++	DEVID_FIELD(ssam_device_id, match_flags);
++	DEVID_FIELD(ssam_device_id, domain);
++	DEVID_FIELD(ssam_device_id, category);
++	DEVID_FIELD(ssam_device_id, target);
++	DEVID_FIELD(ssam_device_id, instance);
++	DEVID_FIELD(ssam_device_id, function);
++
+ 	return 0;
+ }
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index fb4827027536..d21d2871387b 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1375,6 +1375,28 @@ static int do_auxiliary_entry(const char *filename, void *symval, char *alias)
+ 	return 1;
+ }
+ 
++/*
++ * Looks like: ssam:dNcNtNiNfN
++ *
++ * N is exactly 2 digits, where each is an upper-case hex digit.
++ */
++static int do_ssam_entry(const char *filename, void *symval, char *alias)
++{
++	DEF_FIELD(symval, ssam_device_id, match_flags);
++	DEF_FIELD(symval, ssam_device_id, domain);
++	DEF_FIELD(symval, ssam_device_id, category);
++	DEF_FIELD(symval, ssam_device_id, target);
++	DEF_FIELD(symval, ssam_device_id, instance);
++	DEF_FIELD(symval, ssam_device_id, function);
++
++	sprintf(alias, "ssam:d%02Xc%02X", domain, category);
++	ADD(alias, "t", match_flags & SSAM_MATCH_TARGET, target);
++	ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
++	ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
++
++	return 1;
++}
++
+ /* Does namelen bytes of name exactly match the symbol? */
+ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
+ {
+@@ -1450,6 +1472,7 @@ static const struct devtable devtable[] = {
+ 	{"wmi", SIZE_wmi_device_id, do_wmi_entry},
+ 	{"mhi", SIZE_mhi_device_id, do_mhi_entry},
+ 	{"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
++	{"ssam", SIZE_ssam_device_id, do_ssam_entry},
+ };
+ 
+ /* Create MODULE_ALIAS() statements.
+-- 
+2.30.1
+
+From cef2a8715a849b4416e2c3e8d0ef4a799a9f0c4b Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:57 +0100
+Subject: [PATCH] docs: driver-api: Add Surface Aggregator subsystem
+ documentation
+
+Add documentation for the Surface Aggregator subsystem and its client
+drivers, giving an overview of the subsystem, its use-cases, its
+internal structure and internal API, as well as its external API for
+writing client drivers.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-8-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ Documentation/driver-api/index.rst            |   1 +
+ .../surface_aggregator/client-api.rst         |  38 ++
+ .../driver-api/surface_aggregator/client.rst  | 393 ++++++++++++
+ .../surface_aggregator/clients/index.rst      |  10 +
+ .../driver-api/surface_aggregator/index.rst   |  21 +
+ .../surface_aggregator/internal-api.rst       |  67 ++
+ .../surface_aggregator/internal.rst           | 577 ++++++++++++++++++
+ .../surface_aggregator/overview.rst           |  77 +++
+ .../driver-api/surface_aggregator/ssh.rst     | 344 +++++++++++
+ MAINTAINERS                                   |   1 +
+ 10 files changed, 1529 insertions(+)
+ create mode 100644 Documentation/driver-api/surface_aggregator/client-api.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/client.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/index.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/index.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/internal-api.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/internal.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/overview.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/ssh.rst
+
+diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
+index 2456d0a97ed8..9d9af54d68c5 100644
+--- a/Documentation/driver-api/index.rst
++++ b/Documentation/driver-api/index.rst
+@@ -99,6 +99,7 @@ available subsections can be seen below.
+    rfkill
+    serial/index
+    sm501
++   surface_aggregator/index
+    switchtec
+    sync_file
+    vfio-mediated-device
+diff --git a/Documentation/driver-api/surface_aggregator/client-api.rst b/Documentation/driver-api/surface_aggregator/client-api.rst
+new file mode 100644
+index 000000000000..8e0b000d0e64
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/client-api.rst
+@@ -0,0 +1,38 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++===============================
++Client Driver API Documentation
++===============================
++
++.. contents::
++    :depth: 2
++
++
++Serial Hub Communication
++========================
++
++.. kernel-doc:: include/linux/surface_aggregator/serial_hub.h
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.c
++    :export:
++
++
++Controller and Core Interface
++=============================
++
++.. kernel-doc:: include/linux/surface_aggregator/controller.h
++
++.. kernel-doc:: drivers/platform/surface/aggregator/controller.c
++    :export:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/core.c
++    :export:
++
++
++Client Bus and Client Device API
++================================
++
++.. kernel-doc:: include/linux/surface_aggregator/device.h
++
++.. kernel-doc:: drivers/platform/surface/aggregator/bus.c
++    :export:
+diff --git a/Documentation/driver-api/surface_aggregator/client.rst b/Documentation/driver-api/surface_aggregator/client.rst
+new file mode 100644
+index 000000000000..26d13085a117
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/client.rst
+@@ -0,0 +1,393 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
++.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
++.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
++.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
++.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
++.. |ssam_get_controller| replace:: :c:func:`ssam_get_controller`
++.. |ssam_controller_get| replace:: :c:func:`ssam_controller_get`
++.. |ssam_controller_put| replace:: :c:func:`ssam_controller_put`
++.. |ssam_device_alloc| replace:: :c:func:`ssam_device_alloc`
++.. |ssam_device_add| replace:: :c:func:`ssam_device_add`
++.. |ssam_device_remove| replace:: :c:func:`ssam_device_remove`
++.. |ssam_device_driver_register| replace:: :c:func:`ssam_device_driver_register`
++.. |ssam_device_driver_unregister| replace:: :c:func:`ssam_device_driver_unregister`
++.. |module_ssam_device_driver| replace:: :c:func:`module_ssam_device_driver`
++.. |SSAM_DEVICE| replace:: :c:func:`SSAM_DEVICE`
++.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
++.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
++.. |ssam_request_sync| replace:: :c:func:`ssam_request_sync`
++.. |ssam_event_mask| replace:: :c:type:`enum ssam_event_mask <ssam_event_mask>`
++
++
++======================
++Writing Client Drivers
++======================
++
++For the API documentation, refer to:
++
++.. toctree::
++   :maxdepth: 2
++
++   client-api
++
++
++Overview
++========
++
++Client drivers can be set up in two main ways, depending on how the
++corresponding device is made available to the system. We specifically
++differentiate between devices that are presented to the system via one of
++the conventional ways, e.g. as platform devices via ACPI, and devices that
++are non-discoverable and instead need to be explicitly provided by some
++other mechanism, as discussed further below.
++
++
++Non-SSAM Client Drivers
++=======================
++
++All communication with the SAM EC is handled via the |ssam_controller|
++representing that EC to the kernel. Drivers targeting a non-SSAM device (and
++thus not being a |ssam_device_driver|) need to explicitly establish a
++connection/relation to that controller. This can be done via the
++|ssam_client_bind| function. Said function returns a reference to the SSAM
++controller, but, more importantly, also establishes a device link between
++client device and controller (this can also be done separate via
++|ssam_client_link|). It is important to do this, as it, first, guarantees
++that the returned controller is valid for use in the client driver for as
++long as this driver is bound to its device, i.e. that the driver gets
++unbound before the controller ever becomes invalid, and, second, as it
++ensures correct suspend/resume ordering. This setup should be done in the
++driver's probe function, and may be used to defer probing in case the SSAM
++subsystem is not ready yet, for example:
++
++.. code-block:: c
++
++   static int client_driver_probe(struct platform_device *pdev)
++   {
++           struct ssam_controller *ctrl;
++
++           ctrl = ssam_client_bind(&pdev->dev);
++           if (IS_ERR(ctrl))
++                   return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++           // ...
++
++           return 0;
++   }
++
++The controller may be separately obtained via |ssam_get_controller| and its
++lifetime be guaranteed via |ssam_controller_get| and |ssam_controller_put|.
++Note that none of these functions, however, guarantee that the controller
++will not be shut down or suspended. These functions essentially only operate
++on the reference, i.e. only guarantee a bare minimum of accessibility
++without any guarantees at all on practical operability.
++
++
++Adding SSAM Devices
++===================
++
++If a device does not already exist/is not already provided via conventional
++means, it should be provided as |ssam_device| via the SSAM client device
++hub. New devices can be added to this hub by entering their UID into the
++corresponding registry. SSAM devices can also be manually allocated via
++|ssam_device_alloc|, subsequently to which they have to be added via
++|ssam_device_add| and eventually removed via |ssam_device_remove|. By
++default, the parent of the device is set to the controller device provided
++for allocation, however this may be changed before the device is added. Note
++that, when changing the parent device, care must be taken to ensure that the
++controller lifetime and suspend/resume ordering guarantees, in the default
++setup provided through the parent-child relation, are preserved. If
++necessary, by use of |ssam_client_link| as is done for non-SSAM client
++drivers and described in more detail above.
++
++A client device must always be removed by the party which added the
++respective device before the controller shuts down. Such removal can be
++guaranteed by linking the driver providing the SSAM device to the controller
++via |ssam_client_link|, causing it to unbind before the controller driver
++unbinds. Client devices registered with the controller as parent are
++automatically removed when the controller shuts down, but this should not be
++relied upon, especially as this does not extend to client devices with a
++different parent.
++
++
++SSAM Client Drivers
++===================
++
++SSAM client device drivers are, in essence, no different than other device
++driver types. They are represented via |ssam_device_driver| and bind to a
++|ssam_device| via its UID (:c:type:`struct ssam_device.uid <ssam_device>`)
++member and the match table
++(:c:type:`struct ssam_device_driver.match_table <ssam_device_driver>`),
++which should be set when declaring the driver struct instance. Refer to the
++|SSAM_DEVICE| macro documentation for more details on how to define members
++of the driver's match table.
++
++The UID for SSAM client devices consists of a ``domain``, a ``category``,
++a ``target``, an ``instance``, and a ``function``. The ``domain`` is used
++differentiate between physical SAM devices
++(:c:type:`SSAM_DOMAIN_SERIALHUB <ssam_device_domain>`), i.e. devices that can
++be accessed via the Surface Serial Hub, and virtual ones
++(:c:type:`SSAM_DOMAIN_VIRTUAL <ssam_device_domain>`), such as client-device
++hubs, that have no real representation on the SAM EC and are solely used on
++the kernel/driver-side. For physical devices, ``category`` represents the
++target category, ``target`` the target ID, and ``instance`` the instance ID
++used to access the physical SAM device. In addition, ``function`` references
++a specific device functionality, but has no meaning to the SAM EC. The
++(default) name of a client device is generated based on its UID.
++
++A driver instance can be registered via |ssam_device_driver_register| and
++unregistered via |ssam_device_driver_unregister|. For convenience, the
++|module_ssam_device_driver| macro may be used to define module init- and
++exit-functions registering the driver.
++
++The controller associated with a SSAM client device can be found in its
++:c:type:`struct ssam_device.ctrl <ssam_device>` member. This reference is
++guaranteed to be valid for at least as long as the client driver is bound,
++but should also be valid for as long as the client device exists. Note,
++however, that access outside of the bound client driver must ensure that the
++controller device is not suspended while making any requests or
++(un-)registering event notifiers (and thus should generally be avoided). This
++is guaranteed when the controller is accessed from inside the bound client
++driver.
++
++
++Making Synchronous Requests
++===========================
++
++Synchronous requests are (currently) the main form of host-initiated
++communication with the EC. There are a couple of ways to define and execute
++such requests, however, most of them boil down to something similar as shown
++in the example below. This example defines a write-read request, meaning
++that the caller provides an argument to the SAM EC and receives a response.
++The caller needs to know the (maximum) length of the response payload and
++provide a buffer for it.
++
++Care must be taken to ensure that any command payload data passed to the SAM
++EC is provided in little-endian format and, similarly, any response payload
++data received from it is converted from little-endian to host endianness.
++
++.. code-block:: c
++
++   int perform_request(struct ssam_controller *ctrl, u32 arg, u32 *ret)
++   {
++           struct ssam_request rqst;
++           struct ssam_response resp;
++           int status;
++
++           /* Convert request argument to little-endian. */
++           __le32 arg_le = cpu_to_le32(arg);
++           __le32 ret_le = cpu_to_le32(0);
++
++           /*
++            * Initialize request specification. Replace this with your values.
++            * The rqst.payload field may be NULL if rqst.length is zero,
++            * indicating that the request does not have any argument.
++            *
++            * Note: The request parameters used here are not valid, i.e.
++            *       they do not correspond to an actual SAM/EC request.
++            */
++           rqst.target_category = SSAM_SSH_TC_SAM;
++           rqst.target_id = 0x01;
++           rqst.command_id = 0x02;
++           rqst.instance_id = 0x03;
++           rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++           rqst.length = sizeof(arg_le);
++           rqst.payload = (u8 *)&arg_le;
++
++           /* Initialize request response. */
++           resp.capacity = sizeof(ret_le);
++           resp.length = 0;
++           resp.pointer = (u8 *)&ret_le;
++
++           /*
++            * Perform actual request. The response pointer may be null in case
++            * the request does not have any response. This must be consistent
++            * with the SSAM_REQUEST_HAS_RESPONSE flag set in the specification
++            * above.
++            */
++           status = ssam_request_sync(ctrl, &rqst, &resp);
++
++           /*
++            * Alternatively use
++            *
++            *   ssam_request_sync_onstack(ctrl, &rqst, &resp, sizeof(arg_le));
++            *
++            * to perform the request, allocating the message buffer directly
++            * on the stack as opposed to allocation via kzalloc().
++            */
++
++           /*
++            * Convert request response back to native format. Note that in the
++            * error case, this value is not touched by the SSAM core, i.e.
++            * 'ret_le' will be zero as specified in its initialization.
++            */
++           *ret = le32_to_cpu(ret_le);
++
++           return status;
++   }
++
++Note that |ssam_request_sync| in its essence is a wrapper over lower-level
++request primitives, which may also be used to perform requests. Refer to its
++implementation and documentation for more details.
++
++An arguably more user-friendly way of defining such functions is by using
++one of the generator macros, for example via:
++
++.. code-block:: c
++
++   SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
++           .target_category = SSAM_SSH_TC_TMP,
++           .target_id       = 0x01,
++           .command_id      = 0x03,
++           .instance_id     = 0x00,
++   });
++
++This example defines a function
++
++.. code-block:: c
++
++   int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
++
++executing the specified request, with the controller passed in when calling
++said function. In this example, the argument is provided via the ``arg``
++pointer. Note that the generated function allocates the message buffer on
++the stack. Thus, if the argument provided via the request is large, these
++kinds of macros should be avoided. Also note that, in contrast to the
++previous non-macro example, this function does not do any endianness
++conversion, which has to be handled by the caller. Apart from those
++differences the function generated by the macro is similar to the one
++provided in the non-macro example above.
++
++The full list of such function-generating macros is
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_N` for requests without return value and
++  without argument.
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_R` for requests with return value but no
++  argument.
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_W` for requests without return value but
++  with argument.
++
++Refer to their respective documentation for more details. For each one of
++these macros, a special variant is provided, which targets request types
++applicable to multiple instances of the same device type:
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_N`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_R`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_W`
++
++The difference of those macros to the previously mentioned versions is, that
++the device target and instance IDs are not fixed for the generated function,
++but instead have to be provided by the caller of said function.
++
++Additionally, variants for direct use with client devices, i.e.
++|ssam_device|, are also provided. These can, for example, be used as
++follows:
++
++.. code-block:: c
++
++   SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
++           .target_category = SSAM_SSH_TC_BAT,
++           .command_id      = 0x01,
++   });
++
++This invocation of the macro defines a function
++
++.. code-block:: c
++
++   int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
++
++executing the specified request, using the device IDs and controller given
++in the client device. The full list of such macros for client devices is:
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_N`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_R`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_W`
++
++
++Handling Events
++===============
++
++To receive events from the SAM EC, an event notifier must be registered for
++the desired event via |ssam_notifier_register|. The notifier must be
++unregistered via |ssam_notifier_unregister| once it is not required any
++more.
++
++Event notifiers are registered by providing (at minimum) a callback to call
++in case an event has been received, the registry specifying how the event
++should be enabled, an event ID specifying for which target category and,
++optionally and depending on the registry used, for which instance ID events
++should be enabled, and finally, flags describing how the EC will send these
++events. If the specific registry does not enable events by instance ID, the
++instance ID must be set to zero. Additionally, a priority for the respective
++notifier may be specified, which determines its order in relation to any
++other notifier registered for the same target category.
++
++By default, event notifiers will receive all events for the specific target
++category, regardless of the instance ID specified when registering the
++notifier. The core may be instructed to only call a notifier if the target
++ID or instance ID (or both) of the event match the ones implied by the
++notifier IDs (in case of target ID, the target ID of the registry), by
++providing an event mask (see |ssam_event_mask|).
++
++In general, the target ID of the registry is also the target ID of the
++enabled event (with the notable exception being keyboard input events on the
++Surface Laptop 1 and 2, which are enabled via a registry with target ID 1,
++but provide events with target ID 2).
++
++A full example for registering an event notifier and handling received
++events is provided below:
++
++.. code-block:: c
++
++   u32 notifier_callback(struct ssam_event_notifier *nf,
++                         const struct ssam_event *event)
++   {
++           int status = ...
++
++           /* Handle the event here ... */
++
++           /* Convert return value and indicate that we handled the event. */
++           return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++   }
++
++   int setup_notifier(struct ssam_device *sdev,
++                      struct ssam_event_notifier *nf)
++   {
++           /* Set priority wrt. other handlers of same target category. */
++           nf->base.priority = 1;
++
++           /* Set event/notifier callback. */
++           nf->base.fn = notifier_callback;
++
++           /* Specify event registry, i.e. how events get enabled/disabled. */
++           nf->event.reg = SSAM_EVENT_REGISTRY_KIP;
++
++           /* Specify which event to enable/disable */
++           nf->event.id.target_category = sdev->uid.category;
++           nf->event.id.instance = sdev->uid.instance;
++
++           /*
++            * Specify for which events the notifier callback gets executed.
++            * This essentially tells the core if it can skip notifiers that
++            * don't have target or instance IDs matching those of the event.
++            */
++           nf->event.mask = SSAM_EVENT_MASK_STRICT;
++
++           /* Specify event flags. */
++           nf->event.flags = SSAM_EVENT_SEQUENCED;
++
++           return ssam_notifier_register(sdev->ctrl, nf);
++   }
++
++Multiple event notifiers can be registered for the same event. The event
++handler core takes care of enabling and disabling events when notifiers are
++registered and unregistered, by keeping track of how many notifiers for a
++specific event (combination of registry, event target category, and event
++instance ID) are currently registered. This means that a specific event will
++be enabled when the first notifier for it is being registered and disabled
++when the last notifier for it is being unregistered. Note that the event
++flags are therefore only used on the first registered notifier, however, one
++should take care that notifiers for a specific event are always registered
++with the same flag and it is considered a bug to do otherwise.
+diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
+new file mode 100644
+index 000000000000..31e026d96102
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
+@@ -0,0 +1,10 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++===========================
++Client Driver Documentation
++===========================
++
++This is the documentation for client drivers themselves. Refer to
++:doc:`../client` for documentation on how to write client drivers.
++
++.. Place documentation for individual client drivers here.
+diff --git a/Documentation/driver-api/surface_aggregator/index.rst b/Documentation/driver-api/surface_aggregator/index.rst
+new file mode 100644
+index 000000000000..6f3e1094904d
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/index.rst
+@@ -0,0 +1,21 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++=======================================
++Surface System Aggregator Module (SSAM)
++=======================================
++
++.. toctree::
++   :maxdepth: 2
++
++   overview
++   client
++   clients/index
++   ssh
++   internal
++
++.. only::  subproject and html
++
++   Indices
++   =======
++
++   * :ref:`genindex`
+diff --git a/Documentation/driver-api/surface_aggregator/internal-api.rst b/Documentation/driver-api/surface_aggregator/internal-api.rst
+new file mode 100644
+index 000000000000..639a67b5a392
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/internal-api.rst
+@@ -0,0 +1,67 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++==========================
++Internal API Documentation
++==========================
++
++.. contents::
++    :depth: 2
++
++
++Packet Transport Layer
++======================
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_parser.h
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_parser.c
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_msgb.h
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.h
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.c
++    :internal:
++
++
++Request Transport Layer
++=======================
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_request_layer.h
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/ssh_request_layer.c
++    :internal:
++
++
++Controller
++==========
++
++.. kernel-doc:: drivers/platform/surface/aggregator/controller.h
++    :internal:
++
++.. kernel-doc:: drivers/platform/surface/aggregator/controller.c
++    :internal:
++
++
++Client Device Bus
++=================
++
++.. kernel-doc:: drivers/platform/surface/aggregator/bus.c
++    :internal:
++
++
++Core
++====
++
++.. kernel-doc:: drivers/platform/surface/aggregator/core.c
++    :internal:
++
++
++Trace Helpers
++=============
++
++.. kernel-doc:: drivers/platform/surface/aggregator/trace.h
+diff --git a/Documentation/driver-api/surface_aggregator/internal.rst b/Documentation/driver-api/surface_aggregator/internal.rst
+new file mode 100644
+index 000000000000..72704734982a
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/internal.rst
+@@ -0,0 +1,577 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |ssh_ptl| replace:: :c:type:`struct ssh_ptl <ssh_ptl>`
++.. |ssh_ptl_submit| replace:: :c:func:`ssh_ptl_submit`
++.. |ssh_ptl_cancel| replace:: :c:func:`ssh_ptl_cancel`
++.. |ssh_ptl_shutdown| replace:: :c:func:`ssh_ptl_shutdown`
++.. |ssh_ptl_rx_rcvbuf| replace:: :c:func:`ssh_ptl_rx_rcvbuf`
++.. |ssh_rtl| replace:: :c:type:`struct ssh_rtl <ssh_rtl>`
++.. |ssh_rtl_submit| replace:: :c:func:`ssh_rtl_submit`
++.. |ssh_rtl_cancel| replace:: :c:func:`ssh_rtl_cancel`
++.. |ssh_rtl_shutdown| replace:: :c:func:`ssh_rtl_shutdown`
++.. |ssh_packet| replace:: :c:type:`struct ssh_packet <ssh_packet>`
++.. |ssh_packet_get| replace:: :c:func:`ssh_packet_get`
++.. |ssh_packet_put| replace:: :c:func:`ssh_packet_put`
++.. |ssh_packet_ops| replace:: :c:type:`struct ssh_packet_ops <ssh_packet_ops>`
++.. |ssh_packet_base_priority| replace:: :c:type:`enum ssh_packet_base_priority <ssh_packet_base_priority>`
++.. |ssh_packet_flags| replace:: :c:type:`enum ssh_packet_flags <ssh_packet_flags>`
++.. |SSH_PACKET_PRIORITY| replace:: :c:func:`SSH_PACKET_PRIORITY`
++.. |ssh_frame| replace:: :c:type:`struct ssh_frame <ssh_frame>`
++.. |ssh_command| replace:: :c:type:`struct ssh_command <ssh_command>`
++.. |ssh_request| replace:: :c:type:`struct ssh_request <ssh_request>`
++.. |ssh_request_get| replace:: :c:func:`ssh_request_get`
++.. |ssh_request_put| replace:: :c:func:`ssh_request_put`
++.. |ssh_request_ops| replace:: :c:type:`struct ssh_request_ops <ssh_request_ops>`
++.. |ssh_request_init| replace:: :c:func:`ssh_request_init`
++.. |ssh_request_flags| replace:: :c:type:`enum ssh_request_flags <ssh_request_flags>`
++.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
++.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
++.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
++.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
++.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
++.. |ssam_request_sync| replace:: :c:type:`struct ssam_request_sync <ssam_request_sync>`
++.. |ssam_event_registry| replace:: :c:type:`struct ssam_event_registry <ssam_event_registry>`
++.. |ssam_event_id| replace:: :c:type:`struct ssam_event_id <ssam_event_id>`
++.. |ssam_nf| replace:: :c:type:`struct ssam_nf <ssam_nf>`
++.. |ssam_nf_refcount_inc| replace:: :c:func:`ssam_nf_refcount_inc`
++.. |ssam_nf_refcount_dec| replace:: :c:func:`ssam_nf_refcount_dec`
++.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
++.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
++.. |ssam_cplt| replace:: :c:type:`struct ssam_cplt <ssam_cplt>`
++.. |ssam_event_queue| replace:: :c:type:`struct ssam_event_queue <ssam_event_queue>`
++.. |ssam_request_sync_submit| replace:: :c:func:`ssam_request_sync_submit`
++
++=====================
++Core Driver Internals
++=====================
++
++Architectural overview of the Surface System Aggregator Module (SSAM) core
++and Surface Serial Hub (SSH) driver. For the API documentation, refer to:
++
++.. toctree::
++   :maxdepth: 2
++
++   internal-api
++
++
++Overview
++========
++
++The SSAM core implementation is structured in layers, somewhat following the
++SSH protocol structure:
++
++Lower-level packet transport is implemented in the *packet transport layer
++(PTL)*, directly building on top of the serial device (serdev)
++infrastructure of the kernel. As the name indicates, this layer deals with
++the packet transport logic and handles things like packet validation, packet
++acknowledgment (ACKing), packet (retransmission) timeouts, and relaying
++packet payloads to higher-level layers.
++
++Above this sits the *request transport layer (RTL)*. This layer is centered
++around command-type packet payloads, i.e. requests (sent from host to EC),
++responses of the EC to those requests, and events (sent from EC to host).
++It, specifically, distinguishes events from request responses, matches
++responses to their corresponding requests, and implements request timeouts.
++
++The *controller* layer is building on top of this and essentially decides
++how request responses and, especially, events are dealt with. It provides an
++event notifier system, handles event activation/deactivation, provides a
++workqueue for event and asynchronous request completion, and also manages
++the message counters required for building command messages (``SEQ``,
++``RQID``). This layer basically provides a fundamental interface to the SAM
++EC for use in other kernel drivers.
++
++While the controller layer already provides an interface for other kernel
++drivers, the client *bus* extends this interface to provide support for
++native SSAM devices, i.e. devices that are not defined in ACPI and not
++implemented as platform devices, via |ssam_device| and |ssam_device_driver|
++simplify management of client devices and client drivers.
++
++Refer to :doc:`client` for documentation regarding the client device/driver
++API and interface options for other kernel drivers. It is recommended to
++familiarize oneself with that chapter and the :doc:`ssh` before continuing
++with the architectural overview below.
++
++
++Packet Transport Layer
++======================
++
++The packet transport layer is represented via |ssh_ptl| and is structured
++around the following key concepts:
++
++Packets
++-------
++
++Packets are the fundamental transmission unit of the SSH protocol. They are
++managed by the packet transport layer, which is essentially the lowest layer
++of the driver and is built upon by other components of the SSAM core.
++Packets to be transmitted by the SSAM core are represented via |ssh_packet|
++(in contrast, packets received by the core do not have any specific
++structure and are managed entirely via the raw |ssh_frame|).
++
++This structure contains the required fields to manage the packet inside the
++transport layer, as well as a reference to the buffer containing the data to
++be transmitted (i.e. the message wrapped in |ssh_frame|). Most notably, it
++contains an internal reference count, which is used for managing its
++lifetime (accessible via |ssh_packet_get| and |ssh_packet_put|). When this
++counter reaches zero, the ``release()`` callback provided to the packet via
++its |ssh_packet_ops| reference is executed, which may then deallocate the
++packet or its enclosing structure (e.g. |ssh_request|).
++
++In addition to the ``release`` callback, the |ssh_packet_ops| reference also
++provides a ``complete()`` callback, which is run once the packet has been
++completed and provides the status of this completion, i.e. zero on success
++or a negative errno value in case of an error. Once the packet has been
++submitted to the packet transport layer, the ``complete()`` callback is
++always guaranteed to be executed before the ``release()`` callback, i.e. the
++packet will always be completed, either successfully, with an error, or due
++to cancellation, before it will be released.
++
++The state of a packet is managed via its ``state`` flags
++(|ssh_packet_flags|), which also contains the packet type. In particular,
++the following bits are noteworthy:
++
++* ``SSH_PACKET_SF_LOCKED_BIT``: This bit is set when completion, either
++  through error or success, is imminent. It indicates that no further
++  references of the packet should be taken and any existing references
++  should be dropped as soon as possible. The process setting this bit is
++  responsible for removing any references to this packet from the packet
++  queue and pending set.
++
++* ``SSH_PACKET_SF_COMPLETED_BIT``: This bit is set by the process running the
++  ``complete()`` callback and is used to ensure that this callback only runs
++  once.
++
++* ``SSH_PACKET_SF_QUEUED_BIT``: This bit is set when the packet is queued on
++  the packet queue and cleared when it is dequeued.
++
++* ``SSH_PACKET_SF_PENDING_BIT``: This bit is set when the packet is added to
++  the pending set and cleared when it is removed from it.
++
++Packet Queue
++------------
++
++The packet queue is the first of the two fundamental collections in the
++packet transport layer. It is a priority queue, with priority of the
++respective packets based on the packet type (major) and number of tries
++(minor). See |SSH_PACKET_PRIORITY| for more details on the priority value.
++
++All packets to be transmitted by the transport layer must be submitted to
++this queue via |ssh_ptl_submit|. Note that this includes control packets
++sent by the transport layer itself. Internally, data packets can be
++re-submitted to this queue due to timeouts or NAK packets sent by the EC.
++
++Pending Set
++-----------
++
++The pending set is the second of the two fundamental collections in the
++packet transport layer. It stores references to packets that have already
++been transmitted, but wait for acknowledgment (e.g. the corresponding ACK
++packet) by the EC.
++
++Note that a packet may both be pending and queued if it has been
++re-submitted due to a packet acknowledgment timeout or NAK. On such a
++re-submission, packets are not removed from the pending set.
++
++Transmitter Thread
++------------------
++
++The transmitter thread is responsible for most of the actual work regarding
++packet transmission. In each iteration, it (waits for and) checks if the
++next packet on the queue (if any) can be transmitted and, if so, removes it
++from the queue and increments its counter for the number of transmission
++attempts, i.e. tries. If the packet is sequenced, i.e. requires an ACK by
++the EC, the packet is added to the pending set. Next, the packet's data is
++submitted to the serdev subsystem. In case of an error or timeout during
++this submission, the packet is completed by the transmitter thread with the
++status value of the callback set accordingly. In case the packet is
++unsequenced, i.e. does not require an ACK by the EC, the packet is completed
++with success on the transmitter thread.
++
++Transmission of sequenced packets is limited by the number of concurrently
++pending packets, i.e. a limit on how many packets may be waiting for an ACK
++from the EC in parallel. This limit is currently set to one (see :doc:`ssh`
++for the reasoning behind this). Control packets (i.e. ACK and NAK) can
++always be transmitted.
++
++Receiver Thread
++---------------
++
++Any data received from the EC is put into a FIFO buffer for further
++processing. This processing happens on the receiver thread. The receiver
++thread parses and validates the received message into its |ssh_frame| and
++corresponding payload. It prepares and submits the necessary ACK (and on
++validation error or invalid data NAK) packets for the received messages.
++
++This thread also handles further processing, such as matching ACK messages
++to the corresponding pending packet (via sequence ID) and completing it, as
++well as initiating re-submission of all currently pending packets on
++receival of a NAK message (re-submission in case of a NAK is similar to
++re-submission due to timeout, see below for more details on that). Note that
++the successful completion of a sequenced packet will always run on the
++receiver thread (whereas any failure-indicating completion will run on the
++process where the failure occurred).
++
++Any payload data is forwarded via a callback to the next upper layer, i.e.
++the request transport layer.
++
++Timeout Reaper
++--------------
++
++The packet acknowledgment timeout is a per-packet timeout for sequenced
++packets, started when the respective packet begins (re-)transmission (i.e.
++this timeout is armed once per transmission attempt on the transmitter
++thread). It is used to trigger re-submission or, when the number of tries
++has been exceeded, cancellation of the packet in question.
++
++This timeout is handled via a dedicated reaper task, which is essentially a
++work item (re-)scheduled to run when the next packet is set to time out. The
++work item then checks the set of pending packets for any packets that have
++exceeded the timeout and, if there are any remaining packets, re-schedules
++itself to the next appropriate point in time.
++
++If a timeout has been detected by the reaper, the packet will either be
++re-submitted if it still has some remaining tries left, or completed with
++``-ETIMEDOUT`` as status if not. Note that re-submission, in this case and
++triggered by receival of a NAK, means that the packet is added to the queue
++with a now incremented number of tries, yielding a higher priority. The
++timeout for the packet will be disabled until the next transmission attempt
++and the packet remains on the pending set.
++
++Note that due to transmission and packet acknowledgment timeouts, the packet
++transport layer is always guaranteed to make progress, if only through
++timing out packets, and will never fully block.
++
++Concurrency and Locking
++-----------------------
++
++There are two main locks in the packet transport layer: One guarding access
++to the packet queue and one guarding access to the pending set. These
++collections may only be accessed and modified under the respective lock. If
++access to both collections is needed, the pending lock must be acquired
++before the queue lock to avoid deadlocks.
++
++In addition to guarding the collections, after initial packet submission
++certain packet fields may only be accessed under one of the locks.
++Specifically, the packet priority must only be accessed while holding the
++queue lock and the packet timestamp must only be accessed while holding the
++pending lock.
++
++Other parts of the packet transport layer are guarded independently. State
++flags are managed by atomic bit operations and, if necessary, memory
++barriers. Modifications to the timeout reaper work item and expiration date
++are guarded by their own lock.
++
++The reference of the packet to the packet transport layer (``ptl``) is
++somewhat special. It is either set when the upper layer request is submitted
++or, if there is none, when the packet is first submitted. After it is set,
++it will not change its value. Functions that may run concurrently with
++submission, i.e. cancellation, can not rely on the ``ptl`` reference to be
++set. Access to it in these functions is guarded by ``READ_ONCE()``, whereas
++setting ``ptl`` is equally guarded with ``WRITE_ONCE()`` for symmetry.
++
++Some packet fields may be read outside of the respective locks guarding
++them, specifically priority and state for tracing. In those cases, proper
++access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such
++read-only access is only allowed when stale values are not critical.
++
++With respect to the interface for higher layers, packet submission
++(|ssh_ptl_submit|), packet cancellation (|ssh_ptl_cancel|), data receival
++(|ssh_ptl_rx_rcvbuf|), and layer shutdown (|ssh_ptl_shutdown|) may always be
++executed concurrently with respect to each other. Note that packet
++submission may not run concurrently with itself for the same packet.
++Equally, shutdown and data receival may also not run concurrently with
++themselves (but may run concurrently with each other).
++
++
++Request Transport Layer
++=======================
++
++The request transport layer is represented via |ssh_rtl| and builds on top
++of the packet transport layer. It deals with requests, i.e. SSH packets sent
++by the host containing a |ssh_command| as frame payload. This layer
++separates responses to requests from events, which are also sent by the EC
++via a |ssh_command| payload. While responses are handled in this layer,
++events are relayed to the next upper layer, i.e. the controller layer, via
++the corresponding callback. The request transport layer is structured around
++the following key concepts:
++
++Request
++-------
++
++Requests are packets with a command-type payload, sent from host to EC to
++query data from or trigger an action on it (or both simultaneously). They
++are represented by |ssh_request|, wrapping the underlying |ssh_packet|
++storing its message data (i.e. SSH frame with command payload). Note that
++all top-level representations, e.g. |ssam_request_sync| are built upon this
++struct.
++
++As |ssh_request| extends |ssh_packet|, its lifetime is also managed by the
++reference counter inside the packet struct (which can be accessed via
++|ssh_request_get| and |ssh_request_put|). Once the counter reaches zero, the
++``release()`` callback of the |ssh_request_ops| reference of the request is
++called.
++
++Requests can have an optional response that is equally sent via a SSH
++message with command-type payload (from EC to host). The party constructing
++the request must know if a response is expected and mark this in the request
++flags provided to |ssh_request_init|, so that the request transport layer
++can wait for this response.
++
++Similar to |ssh_packet|, |ssh_request| also has a ``complete()`` callback
++provided via its request ops reference and is guaranteed to be completed
++before it is released once it has been submitted to the request transport
++layer via |ssh_rtl_submit|. For a request without a response, successful
++completion will occur once the underlying packet has been successfully
++transmitted by the packet transport layer (i.e. from within the packet
++completion callback). For a request with response, successful completion
++will occur once the response has been received and matched to the request
++via its request ID (which happens on the packet layer's data-received
++callback running on the receiver thread). If the request is completed with
++an error, the status value will be set to the corresponding (negative) errno
++value.
++
++The state of a request is again managed via its ``state`` flags
++(|ssh_request_flags|), which also encode the request type. In particular,
++the following bits are noteworthy:
++
++* ``SSH_REQUEST_SF_LOCKED_BIT``: This bit is set when completion, either
++  through error or success, is imminent. It indicates that no further
++  references of the request should be taken and any existing references
++  should be dropped as soon as possible. The process setting this bit is
++  responsible for removing any references to this request from the request
++  queue and pending set.
++
++* ``SSH_REQUEST_SF_COMPLETED_BIT``: This bit is set by the process running the
++  ``complete()`` callback and is used to ensure that this callback only runs
++  once.
++
++* ``SSH_REQUEST_SF_QUEUED_BIT``: This bit is set when the request is queued on
++  the request queue and cleared when it is dequeued.
++
++* ``SSH_REQUEST_SF_PENDING_BIT``: This bit is set when the request is added to
++  the pending set and cleared when it is removed from it.
++
++Request Queue
++-------------
++
++The request queue is the first of the two fundamental collections in the
++request transport layer. In contrast to the packet queue of the packet
++transport layer, it is not a priority queue and the simple first come first
++serve principle applies.
++
++All requests to be transmitted by the request transport layer must be
++submitted to this queue via |ssh_rtl_submit|. Once submitted, requests may
++not be re-submitted, and will not be re-submitted automatically on timeout.
++Instead, the request is completed with a timeout error. If desired, the
++caller can create and submit a new request for another try, but it must not
++submit the same request again.
++
++Pending Set
++-----------
++
++The pending set is the second of the two fundamental collections in the
++request transport layer. This collection stores references to all pending
++requests, i.e. requests awaiting a response from the EC (similar to what the
++pending set of the packet transport layer does for packets).
++
++Transmitter Task
++----------------
++
++The transmitter task is scheduled when a new request is available for
++transmission. It checks if the next request on the request queue can be
++transmitted and, if so, submits its underlying packet to the packet
++transport layer. This check ensures that only a limited number of
++requests can be pending, i.e. waiting for a response, at the same time. If
++the request requires a response, the request is added to the pending set
++before its packet is submitted.
++
++Packet Completion Callback
++--------------------------
++
++The packet completion callback is executed once the underlying packet of a
++request has been completed. In case of an error completion, the
++corresponding request is completed with the error value provided in this
++callback.
++
++On successful packet completion, further processing depends on the request.
++If the request expects a response, it is marked as transmitted and the
++request timeout is started. If the request does not expect a response, it is
++completed with success.
++
++Data-Received Callback
++----------------------
++
++The data received callback notifies the request transport layer of data
++being received by the underlying packet transport layer via a data-type
++frame. In general, this is expected to be a command-type payload.
++
++If the request ID of the command is one of the request IDs reserved for
++events (one to ``SSH_NUM_EVENTS``, inclusively), it is forwarded to the
++event callback registered in the request transport layer. If the request ID
++indicates a response to a request, the respective request is looked up in
++the pending set and, if found and marked as transmitted, completed with
++success.
++
++Timeout Reaper
++--------------
++
++The request-response-timeout is a per-request timeout for requests expecting
++a response. It is used to ensure that a request does not wait indefinitely
++on a response from the EC and is started after the underlying packet has
++been successfully completed.
++
++This timeout is, similar to the packet acknowledgment timeout on the packet
++transport layer, handled via a dedicated reaper task. This task is
++essentially a work-item (re-)scheduled to run when the next request is set
++to time out. The work item then scans the set of pending requests for any
++requests that have timed out and completes them with ``-ETIMEDOUT`` as
++status. Requests will not be re-submitted automatically. Instead, the issuer
++of the request must construct and submit a new request, if so desired.
++
++Note that this timeout, in combination with packet transmission and
++acknowledgment timeouts, guarantees that the request layer will always make
++progress, even if only through timing out packets, and never fully block.
++
++Concurrency and Locking
++-----------------------
++
++Similar to the packet transport layer, there are two main locks in the
++request transport layer: One guarding access to the request queue and one
++guarding access to the pending set. These collections may only be accessed
++and modified under the respective lock.
++
++Other parts of the request transport layer are guarded independently. State
++flags are (again) managed by atomic bit operations and, if necessary, memory
++barriers. Modifications to the timeout reaper work item and expiration date
++are guarded by their own lock.
++
++Some request fields may be read outside of the respective locks guarding
++them, specifically the state for tracing. In those cases, proper access is
++ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such read-only
++access is only allowed when stale values are not critical.
++
++With respect to the interface for higher layers, request submission
++(|ssh_rtl_submit|), request cancellation (|ssh_rtl_cancel|), and layer
++shutdown (|ssh_rtl_shutdown|) may always be executed concurrently with
++respect to each other. Note that request submission may not run concurrently
++with itself for the same request (and also may only be called once per
++request). Equally, shutdown may also not run concurrently with itself.
++
++
++Controller Layer
++================
++
++The controller layer extends on the request transport layer to provide an
++easy-to-use interface for client drivers. It is represented by
++|ssam_controller| and the SSH driver. While the lower level transport layers
++take care of transmitting and handling packets and requests, the controller
++layer takes on more of a management role. Specifically, it handles device
++initialization, power management, and event handling, including event
++delivery and registration via the (event) completion system (|ssam_cplt|).
++
++Event Registration
++------------------
++
++In general, an event (or rather a class of events) has to be explicitly
++requested by the host before the EC will send it (HID input events seem to
++be the exception). This is done via an event-enable request (similarly,
++events should be disabled via an event-disable request once no longer
++desired).
++
++The specific request used to enable (or disable) an event is given via an
++event registry, i.e. the governing authority of this event (so to speak),
++represented by |ssam_event_registry|. As parameters to this request, the
++target category and, depending on the event registry, instance ID of the
++event to be enabled must be provided. This (optional) instance ID must be
++zero if the registry does not use it. Together, target category and instance
++ID form the event ID, represented by |ssam_event_id|. In short, both, event
++registry and event ID, are required to uniquely identify a respective class
++of events.
++
++Note that a further *request ID* parameter must be provided for the
++enable-event request. This parameter does not influence the class of events
++being enabled, but instead is set as the request ID (RQID) on each event of
++this class sent by the EC. It is used to identify events (as a limited
++number of request IDs is reserved for use in events only, specifically one
++to ``SSH_NUM_EVENTS`` inclusively) and also map events to their specific
++class. Currently, the controller always sets this parameter to the target
++category specified in |ssam_event_id|.
++
++As multiple client drivers may rely on the same (or overlapping) classes of
++events and enable/disable calls are strictly binary (i.e. on/off), the
++controller has to manage access to these events. It does so via reference
++counting, storing the counter inside an RB-tree based mapping with event
++registry and ID as key (there is no known list of valid event registry and
++event ID combinations). See |ssam_nf|, |ssam_nf_refcount_inc|, and
++|ssam_nf_refcount_dec| for details.
++
++This management is done together with notifier registration (described in
++the next section) via the top-level |ssam_notifier_register| and
++|ssam_notifier_unregister| functions.
++
++Event Delivery
++--------------
++
++To receive events, a client driver has to register an event notifier via
++|ssam_notifier_register|. This increments the reference counter for that
++specific class of events (as detailed in the previous section), enables the
++class on the EC (if it has not been enabled already), and installs the
++provided notifier callback.
++
++Notifier callbacks are stored in lists, with one (RCU) list per target
++category (provided via the event ID; NB: there is a fixed known number of
++target categories). There is no known association from the combination of
++event registry and event ID to the command data (target ID, target category,
++command ID, and instance ID) that can be provided by an event class, apart
++from target category and instance ID given via the event ID.
++
++Note that due to the way notifiers are (or rather have to be) stored, client
++drivers may receive events that they have not requested and need to account
++for them. Specifically, they will, by default, receive all events from the
++same target category. To simplify dealing with this, filtering of events by
++target ID (provided via the event registry) and instance ID (provided via
++the event ID) can be requested when registering a notifier. This filtering
++is applied when iterating over the notifiers at the time they are executed.
++
++All notifier callbacks are executed on a dedicated workqueue, the so-called
++completion workqueue. After an event has been received via the callback
++installed in the request layer (running on the receiver thread of the packet
++transport layer), it will be put on its respective event queue
++(|ssam_event_queue|). From this event queue the completion work item of that
++queue (running on the completion workqueue) will pick up the event and
++execute the notifier callback. This is done to avoid blocking on the
++receiver thread.
++
++There is one event queue per combination of target ID and target category.
++This is done to ensure that notifier callbacks are executed in sequence for
++events of the same target ID and target category. Callbacks can be executed
++in parallel for events with a different combination of target ID and target
++category.
++
++Concurrency and Locking
++-----------------------
++
++Most of the concurrency related safety guarantees of the controller are
++provided by the lower-level request transport layer. In addition to this,
++event (un-)registration is guarded by its own lock.
++
++Access to the controller state is guarded by the state lock. This lock is a
++read/write semaphore. The reader part can be used to ensure that the state
++does not change while functions depending on the state to stay the same
++(e.g. |ssam_notifier_register|, |ssam_notifier_unregister|,
++|ssam_request_sync_submit|, and derivatives) are executed and this guarantee
++is not already provided otherwise (e.g. through |ssam_client_bind| or
++|ssam_client_link|). The writer part guards any transitions that will change
++the state, i.e. initialization, destruction, suspension, and resumption.
++
++The controller state may be accessed (read-only) outside the state lock for
++smoke-testing against invalid API usage (e.g. in |ssam_request_sync_submit|).
++Note that such checks are not supposed to (and will not) protect against all
++invalid usages, but rather aim to help catch them. In those cases, proper
++variable access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``.
++
++Assuming any preconditions on the state not changing have been satisfied,
++all non-initialization and non-shutdown functions may run concurrently with
++each other. This includes |ssam_notifier_register|, |ssam_notifier_unregister|,
++|ssam_request_sync_submit|, as well as all functions building on top of those.
+diff --git a/Documentation/driver-api/surface_aggregator/overview.rst b/Documentation/driver-api/surface_aggregator/overview.rst
+new file mode 100644
+index 000000000000..1e9d57e50063
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/overview.rst
+@@ -0,0 +1,77 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++========
++Overview
++========
++
++The Surface/System Aggregator Module (SAM, SSAM) is an (arguably *the*)
++embedded controller (EC) on Microsoft Surface devices. It has been originally
++introduced on 4th generation devices (Surface Pro 4, Surface Book 1), but
++its responsibilities and feature-set have since been expanded significantly
++with the following generations.
++
++
++Features and Integration
++========================
++
++Not much is currently known about SAM on 4th generation devices (Surface Pro
++4, Surface Book 1), due to the use of a different communication interface
++between host and EC (as detailed below). On 5th (Surface Pro 2017, Surface
++Book 2, Surface Laptop 1) and later generation devices, SAM is responsible
++for providing battery information (both current status and static values,
++such as maximum capacity etc.), as well as an assortment of temperature
++sensors (e.g. skin temperature) and cooling/performance-mode setting to the
++host. On the Surface Book 2, specifically, it additionally provides an
++interface for properly handling clipboard detachment (i.e. separating the
++display part from the keyboard part of the device), on the Surface Laptop 1
++and 2 it is required for keyboard HID input. This HID subsystem has been
++restructured for 7th generation devices and on those, specifically Surface
++Laptop 3 and Surface Book 3, is responsible for all major HID input (i.e.
++keyboard and touchpad).
++
++While features have not changed much on a coarse level since the 5th
++generation, internal interfaces have undergone some rather large changes. On
++5th and 6th generation devices, both battery and temperature information is
++exposed to ACPI via a shim driver (referred to as Surface ACPI Notify, or
++SAN), translating ACPI generic serial bus write-/read-accesses to SAM
++requests. On 7th generation devices, this additional layer is gone and these
++devices require a driver hooking directly into the SAM interface. Equally,
++on newer generations, less devices are declared in ACPI, making them a bit
++harder to discover and requiring us to hard-code a sort of device registry.
++Due to this, a SSAM bus and subsystem with client devices
++(:c:type:`struct ssam_device <ssam_device>`) has been implemented.
++
++
++Communication
++=============
++
++The type of communication interface between host and EC depends on the
++generation of the Surface device. On 4th generation devices, host and EC
++communicate via HID, specifically using a HID-over-I2C device, whereas on
++5th and later generations, communication takes place via a USART serial
++device. In accordance to the drivers found on other operating systems, we
++refer to the serial device and its driver as Surface Serial Hub (SSH). When
++needed, we differentiate between both types of SAM by referring to them as
++SAM-over-SSH and SAM-over-HID.
++
++Currently, this subsystem only supports SAM-over-SSH. The SSH communication
++interface is described in more detail below. The HID interface has not been
++reverse engineered yet and it is, at the moment, unclear how many (and
++which) concepts of the SSH interface detailed below can be transferred to
++it.
++
++Surface Serial Hub
++------------------
++
++As already elaborated above, the Surface Serial Hub (SSH) is the
++communication interface for SAM on 5th- and all later-generation Surface
++devices. On the highest level, communication can be separated into two main
++types: Requests, messages sent from host to EC that may trigger a direct
++response from the EC (explicitly associated with the request), and events
++(sometimes also referred to as notifications), sent from EC to host without
++being a direct response to a previous request. We may also refer to requests
++without response as commands. In general, events need to be enabled via one
++of multiple dedicated requests before they are sent by the EC.
++
++See :doc:`ssh` for a more technical protocol documentation and
++:doc:`internal` for an overview of the internal driver architecture.
+diff --git a/Documentation/driver-api/surface_aggregator/ssh.rst b/Documentation/driver-api/surface_aggregator/ssh.rst
+new file mode 100644
+index 000000000000..bf007d6c9873
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/ssh.rst
+@@ -0,0 +1,344 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |u8| replace:: :c:type:`u8 <u8>`
++.. |u16| replace:: :c:type:`u16 <u16>`
++.. |TYPE| replace:: ``TYPE``
++.. |LEN| replace:: ``LEN``
++.. |SEQ| replace:: ``SEQ``
++.. |SYN| replace:: ``SYN``
++.. |NAK| replace:: ``NAK``
++.. |ACK| replace:: ``ACK``
++.. |DATA| replace:: ``DATA``
++.. |DATA_SEQ| replace:: ``DATA_SEQ``
++.. |DATA_NSQ| replace:: ``DATA_NSQ``
++.. |TC| replace:: ``TC``
++.. |TID| replace:: ``TID``
++.. |IID| replace:: ``IID``
++.. |RQID| replace:: ``RQID``
++.. |CID| replace:: ``CID``
++
++===========================
++Surface Serial Hub Protocol
++===========================
++
++The Surface Serial Hub (SSH) is the central communication interface for the
++embedded Surface Aggregator Module controller (SAM or EC), found on newer
++Surface generations. We will refer to this protocol and interface as
++SAM-over-SSH, as opposed to SAM-over-HID for the older generations.
++
++On Surface devices with SAM-over-SSH, SAM is connected to the host via UART
++and defined in ACPI as device with ID ``MSHW0084``. On these devices,
++significant functionality is provided via SAM, including access to battery
++and power information and events, thermal read-outs and events, and many
++more. For Surface Laptops, keyboard input is handled via HID directed
++through SAM, on the Surface Laptop 3 and Surface Book 3 this also includes
++touchpad input.
++
++Note that the standard disclaimer for this subsystem also applies to this
++document: All of this has been reverse-engineered and may thus be erroneous
++and/or incomplete.
++
++All CRCs used in the following are two-byte ``crc_ccitt_false(0xffff, ...)``.
++All multi-byte values are little-endian, there is no implicit padding between
++values.
++
++
++SSH Packet Protocol: Definitions
++================================
++
++The fundamental communication unit of the SSH protocol is a frame
++(:c:type:`struct ssh_frame <ssh_frame>`). A frame consists of the following
++fields, packed together and in order:
++
++.. flat-table:: SSH Frame
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - |TYPE|
++     - |u8|
++     - Type identifier of the frame.
++
++   * - |LEN|
++     - |u16|
++     - Length of the payload associated with the frame.
++
++   * - |SEQ|
++     - |u8|
++     - Sequence ID (see explanation below).
++
++Each frame structure is followed by a CRC over this structure. The CRC over
++the frame structure (|TYPE|, |LEN|, and |SEQ| fields) is placed directly
++after the frame structure and before the payload. The payload is followed by
++its own CRC (over all payload bytes). If the payload is not present (i.e.
++the frame has ``LEN=0``), the CRC of the payload is still present and will
++evaluate to ``0xffff``. The |LEN| field does not include any of the CRCs, it
++equals the number of bytes inbetween the CRC of the frame and the CRC of the
++payload.
++
++Additionally, the following fixed two-byte sequences are used:
++
++.. flat-table:: SSH Byte Sequences
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Description
++
++   * - |SYN|
++     - ``[0xAA, 0x55]``
++     - Synchronization bytes.
++
++A message consists of |SYN|, followed by the frame (|TYPE|, |LEN|, |SEQ| and
++CRC) and, if specified in the frame (i.e. ``LEN > 0``), payload bytes,
++followed finally, regardless if the payload is present, the payload CRC. The
++messages corresponding to an exchange are, in part, identified by having the
++same sequence ID (|SEQ|), stored inside the frame (more on this in the next
++section). The sequence ID is a wrapping counter.
++
++A frame can have the following types
++(:c:type:`enum ssh_frame_type <ssh_frame_type>`):
++
++.. flat-table:: SSH Frame Types
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Short Description
++
++   * - |NAK|
++     - ``0x04``
++     - Sent on error in previously received message.
++
++   * - |ACK|
++     - ``0x40``
++     - Sent to acknowledge receival of |DATA| frame.
++
++   * - |DATA_SEQ|
++     - ``0x80``
++     - Sent to transfer data. Sequenced.
++
++   * - |DATA_NSQ|
++     - ``0x00``
++     - Same as |DATA_SEQ|, but does not need to be ACKed.
++
++Both |NAK|- and |ACK|-type frames are used to control flow of messages and
++thus do not carry a payload. |DATA_SEQ|- and |DATA_NSQ|-type frames on the
++other hand must carry a payload. The flow sequence and interaction of
++different frame types will be described in more depth in the next section.
++
++
++SSH Packet Protocol: Flow Sequence
++==================================
++
++Each exchange begins with |SYN|, followed by a |DATA_SEQ|- or
++|DATA_NSQ|-type frame, followed by its CRC, payload, and payload CRC. In
++case of a |DATA_NSQ|-type frame, the exchange is then finished. In case of a
++|DATA_SEQ|-type frame, the receiving party has to acknowledge receival of
++the frame by responding with a message containing an |ACK|-type frame with
++the same sequence ID of the |DATA| frame. In other words, the sequence ID of
++the |ACK| frame specifies the |DATA| frame to be acknowledged. In case of an
++error, e.g. an invalid CRC, the receiving party responds with a message
++containing an |NAK|-type frame. As the sequence ID of the previous data
++frame, for which an error is indicated via the |NAK| frame, cannot be relied
++upon, the sequence ID of the |NAK| frame should not be used and is set to
++zero. After receival of an |NAK| frame, the sending party should re-send all
++outstanding (non-ACKed) messages.
++
++Sequence IDs are not synchronized between the two parties, meaning that they
++are managed independently for each party. Identifying the messages
++corresponding to a single exchange thus relies on the sequence ID as well as
++the type of the message, and the context. Specifically, the sequence ID is
++used to associate an ``ACK`` with its ``DATA_SEQ``-type frame, but not
++``DATA_SEQ``- or ``DATA_NSQ``-type frames with other ``DATA``- type frames.
++
++An example exchange might look like this:
++
++::
++
++    tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
++    rx: ------------------------------------- SYN FRAME(A) CRC(F) CRC(P) --
++
++where both frames have the same sequence ID (``SEQ``). Here, ``FRAME(D)``
++indicates a |DATA_SEQ|-type frame, ``FRAME(A)`` an ``ACK``-type frame,
++``CRC(F)`` the CRC over the previous frame, ``CRC(P)`` the CRC over the
++previous payload. In case of an error, the exchange would look like this:
++
++::
++
++    tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
++    rx: ------------------------------------- SYN FRAME(N) CRC(F) CRC(P) --
++
++upon which the sender should re-send the message. ``FRAME(N)`` indicates an
++|NAK|-type frame. Note that the sequence ID of the |NAK|-type frame is fixed
++to zero. For |DATA_NSQ|-type frames, both exchanges are the same:
++
++::
++
++    tx: -- SYN FRAME(DATA_NSQ) CRC(F) PAYLOAD CRC(P) ----------------------
++    rx: -------------------------------------------------------------------
++
++Here, an error can be detected, but not corrected or indicated to the
++sending party. These exchanges are symmetric, i.e. switching ``rx`` and
++``tx`` results again in a valid exchange. Currently, no longer exchanges are
++known.
++
++
++Commands: Requests, Responses, and Events
++=========================================
++
++Commands are sent as payload inside a data frame. Currently, this is the
++only known payload type of |DATA| frames, with a payload-type value of
++``0x80`` (:c:type:`SSH_PLD_TYPE_CMD <ssh_payload_type>`).
++
++The command-type payload (:c:type:`struct ssh_command <ssh_command>`)
++consists of an eight-byte command structure, followed by optional and
++variable length command data. The length of this optional data is derived
++from the frame payload length given in the corresponding frame, i.e. it is
++``frame.len - sizeof(struct ssh_command)``. The command struct contains the
++following fields, packed together and in order:
++
++.. flat-table:: SSH Command
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - |TYPE|
++     - |u8|
++     - Type of the payload. For commands always ``0x80``.
++
++   * - |TC|
++     - |u8|
++     - Target category.
++
++   * - |TID| (out)
++     - |u8|
++     - Target ID for outgoing (host to EC) commands.
++
++   * - |TID| (in)
++     - |u8|
++     - Target ID for incoming (EC to host) commands.
++
++   * - |IID|
++     - |u8|
++     - Instance ID.
++
++   * - |RQID|
++     - |u16|
++     - Request ID.
++
++   * - |CID|
++     - |u8|
++     - Command ID.
++
++The command struct and data, in general, does not contain any failure
++detection mechanism (e.g. CRCs), this is solely done on the frame level.
++
++Command-type payloads are used by the host to send commands and requests to
++the EC as well as by the EC to send responses and events back to the host.
++We differentiate between requests (sent by the host), responses (sent by the
++EC in response to a request), and events (sent by the EC without a preceding
++request).
++
++Commands and events are uniquely identified by their target category
++(``TC``) and command ID (``CID``). The target category specifies a general
++category for the command (e.g. system in general, vs. battery and AC, vs.
++temperature, and so on), while the command ID specifies the command inside
++that category. Only the combination of |TC| + |CID| is unique. Additionally,
++commands have an instance ID (``IID``), which is used to differentiate
++between different sub-devices. For example ``TC=3`` ``CID=1`` is a
++request to get the temperature on a thermal sensor, where |IID| specifies
++the respective sensor. If the instance ID is not used, it should be set to
++zero. If instance IDs are used, they, in general, start with a value of one,
++whereas zero may be used for instance independent queries, if applicable. A
++response to a request should have the same target category, command ID, and
++instance ID as the corresponding request.
++
++Responses are matched to their corresponding request via the request ID
++(``RQID``) field. This is a 16 bit wrapping counter similar to the sequence
++ID on the frames. Note that the sequence ID of the frames for a
++request-response pair does not match. Only the request ID has to match.
++Frame-protocol wise these are two separate exchanges, and may even be
++separated, e.g. by an event being sent after the request but before the
++response. Not all commands produce a response, and this is not detectable by
++|TC| + |CID|. It is the responsibility of the issuing party to wait for a
++response (or signal this to the communication framework, as is done in
++SAN/ACPI via the ``SNC`` flag).
++
++Events are identified by unique and reserved request IDs. These IDs should
++not be used by the host when sending a new request. They are used on the
++host to, first, detect events and, second, match them with a registered
++event handler. Request IDs for events are chosen by the host and directed to
++the EC when setting up and enabling an event source (via the
++enable-event-source request). The EC then uses the specified request ID for
++events sent from the respective source. Note that an event should still be
++identified by its target category, command ID, and, if applicable, instance
++ID, as a single event source can send multiple different event types. In
++general, however, a single target category should map to a single reserved
++event request ID.
++
++Furthermore, requests, responses, and events have an associated target ID
++(``TID``). This target ID is split into output (host to EC) and input (EC to
++host) fields, with the respecting other field (e.g. output field on incoming
++messages) set to zero. Two ``TID`` values are known: Primary (``0x01``) and
++secondary (``0x02``). In general, the response to a request should have the
++same ``TID`` value, however, the field (output vs. input) should be used in
++accordance to the direction in which the response is sent (i.e. on the input
++field, as responses are generally sent from the EC to the host).
++
++Note that, even though requests and events should be uniquely identifiable
++by target category and command ID alone, the EC may require specific
++target ID and instance ID values to accept a command. A command that is
++accepted for ``TID=1``, for example, may not be accepted for ``TID=2``
++and vice versa.
++
++
++Limitations and Observations
++============================
++
++The protocol can, in theory, handle up to ``U8_MAX`` frames in parallel,
++with up to ``U16_MAX`` pending requests (neglecting request IDs reserved for
++events). In practice, however, this is more limited. From our testing
++(although via a python and thus a user-space program), it seems that the EC
++can handle up to four requests (mostly) reliably in parallel at a certain
++time. With five or more requests in parallel, consistent discarding of
++commands (ACKed frame but no command response) has been observed. For five
++simultaneous commands, this reproducibly resulted in one command being
++dropped and four commands being handled.
++
++However, it has also been noted that, even with three requests in parallel,
++occasional frame drops happen. Apart from this, with a limit of three
++pending requests, no dropped commands (i.e. command being dropped but frame
++carrying command being ACKed) have been observed. In any case, frames (and
++possibly also commands) should be re-sent by the host if a certain timeout
++is exceeded. This is done by the EC for frames with a timeout of one second,
++up to two re-tries (i.e. three transmissions in total). The limit of
++re-tries also applies to received NAKs, and, in a worst case scenario, can
++lead to entire messages being dropped.
++
++While this also seems to work fine for pending data frames as long as no
++transmission failures occur, implementation and handling of these seems to
++depend on the assumption that there is only one non-acknowledged data frame.
++In particular, the detection of repeated frames relies on the last sequence
++number. This means that, if a frame that has been successfully received by
++the EC is sent again, e.g. due to the host not receiving an |ACK|, the EC
++will only detect this if it has the sequence ID of the last frame received
++by the EC. As an example: Sending two frames with ``SEQ=0`` and ``SEQ=1``
++followed by a repetition of ``SEQ=0`` will not detect the second ``SEQ=0``
++frame as such, and thus execute the command in this frame each time it has
++been received, i.e. twice in this example. Sending ``SEQ=0``, ``SEQ=1`` and
++then repeating ``SEQ=1`` will detect the second ``SEQ=1`` as repetition of
++the first one and ignore it, thus executing the contained command only once.
++
++In conclusion, this suggests a limit of at most one pending un-ACKed frame
++(per party, effectively leading to synchronous communication regarding
++frames) and at most three pending commands. The limit to synchronous frame
++transfers seems to be consistent with behavior observed on Windows.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 2818a31d79db..c89d831e74e0 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11812,6 +11812,7 @@ M:	Maximilian Luz <luzmaximilian@gmail.com>
+ S:	Maintained
+ W:	https://github.com/linux-surface/surface-aggregator-module
+ C:	irc://chat.freenode.net/##linux-surface
++F:	Documentation/driver-api/surface_aggregator/
+ F:	drivers/platform/surface/aggregator/
+ F:	include/linux/surface_aggregator/
+ 
+-- 
+2.30.1
+
+From b1ef35b9076e896c80c2ddc77c694226db1aaee6 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:58 +0100
+Subject: [PATCH] platform/surface: Add Surface Aggregator user-space interface
+
+Add a misc-device providing user-space access to the Surface Aggregator
+EC, mainly intended for debugging, testing, and reverse-engineering.
+This interface gives user-space applications the ability to send
+requests to the EC and receive the corresponding responses.
+
+The device-file is managed by a pseudo platform-device and corresponding
+driver to avoid dependence on the dedicated bus, allowing it to be
+loaded in a minimal configuration.
+
+A python library and scripts to access this device can be found at [1].
+
+[1]: https://github.com/linux-surface/surface-aggregator-module/tree/master/scripts/ssam
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-9-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ .../surface_aggregator/clients/cdev.rst       |  87 +++++
+ .../surface_aggregator/clients/index.rst      |  12 +-
+ .../userspace-api/ioctl/ioctl-number.rst      |   2 +
+ MAINTAINERS                                   |   2 +
+ drivers/platform/surface/Kconfig              |  17 +
+ drivers/platform/surface/Makefile             |   1 +
+ .../surface/surface_aggregator_cdev.c         | 303 ++++++++++++++++++
+ include/uapi/linux/surface_aggregator/cdev.h  |  78 +++++
+ 8 files changed, 501 insertions(+), 1 deletion(-)
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/cdev.rst
+ create mode 100644 drivers/platform/surface/surface_aggregator_cdev.c
+ create mode 100644 include/uapi/linux/surface_aggregator/cdev.h
+
+diff --git a/Documentation/driver-api/surface_aggregator/clients/cdev.rst b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
+new file mode 100644
+index 000000000000..248c1372d879
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
+@@ -0,0 +1,87 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |u8| replace:: :c:type:`u8 <u8>`
++.. |u16| replace:: :c:type:`u16 <u16>`
++.. |ssam_cdev_request| replace:: :c:type:`struct ssam_cdev_request <ssam_cdev_request>`
++.. |ssam_cdev_request_flags| replace:: :c:type:`enum ssam_cdev_request_flags <ssam_cdev_request_flags>`
++
++==============================
++User-Space EC Interface (cdev)
++==============================
++
++The ``surface_aggregator_cdev`` module provides a misc-device for the SSAM
++controller to allow for a (more or less) direct connection from user-space to
++the SAM EC. It is intended to be used for development and debugging, and
++therefore should not be used or relied upon in any other way. Note that this
++module is not loaded automatically, but instead must be loaded manually.
++
++The provided interface is accessible through the ``/dev/surface/aggregator``
++device-file. All functionality of this interface is provided via IOCTLs.
++These IOCTLs and their respective input/output parameter structs are defined in
++``include/uapi/linux/surface_aggregator/cdev.h``.
++
++A small python library and scripts for accessing this interface can be found
++at https://github.com/linux-surface/surface-aggregator-module/tree/master/scripts/ssam.
++
++
++Controller IOCTLs
++=================
++
++The following IOCTLs are provided:
++
++.. flat-table:: Controller IOCTLs
++   :widths: 1 1 1 1 4
++   :header-rows: 1
++
++   * - Type
++     - Number
++     - Direction
++     - Name
++     - Description
++
++   * - ``0xA5``
++     - ``1``
++     - ``WR``
++     - ``REQUEST``
++     - Perform synchronous SAM request.
++
++
++``REQUEST``
++-----------
++
++Defined as ``_IOWR(0xA5, 1, struct ssam_cdev_request)``.
++
++Executes a synchronous SAM request. The request specification is passed in
++as argument of type |ssam_cdev_request|, which is then written to/modified
++by the IOCTL to return status and result of the request.
++
++Request payload data must be allocated separately and is passed in via the
++``payload.data`` and ``payload.length`` members. If a response is required,
++the response buffer must be allocated by the caller and passed in via the
++``response.data`` member. The ``response.length`` member must be set to the
++capacity of this buffer, or if no response is required, zero. Upon
++completion of the request, the call will write the response to the response
++buffer (if its capacity allows it) and overwrite the length field with the
++actual size of the response, in bytes.
++
++Additionally, if the request has a response, this must be indicated via the
++request flags, as is done with in-kernel requests. Request flags can be set
++via the ``flags`` member and the values correspond to the values found in
++|ssam_cdev_request_flags|.
++
++Finally, the status of the request itself is returned in the ``status``
++member (a negative errno value indicating failure). Note that failure
++indication of the IOCTL is separated from failure indication of the request:
++The IOCTL returns a negative status code if anything failed during setup of
++the request (``-EFAULT``) or if the provided argument or any of its fields
++are invalid (``-EINVAL``). In this case, the status value of the request
++argument may be set, providing more detail on what went wrong (e.g.
++``-ENOMEM`` for out-of-memory), but this value may also be zero. The IOCTL
++will return with a zero status code in case the request has been set up,
++submitted, and completed (i.e. handed back to user-space) successfully from
++inside the IOCTL, but the request ``status`` member may still be negative in
++case the actual execution of the request failed after it has been submitted.
++
++A full definition of the argument struct is provided below:
++
++.. kernel-doc:: include/uapi/linux/surface_aggregator/cdev.h
+diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
+index 31e026d96102..ab260ec82cfb 100644
+--- a/Documentation/driver-api/surface_aggregator/clients/index.rst
++++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
+@@ -7,4 +7,14 @@ Client Driver Documentation
+ This is the documentation for client drivers themselves. Refer to
+ :doc:`../client` for documentation on how to write client drivers.
+ 
+-.. Place documentation for individual client drivers here.
++.. toctree::
++   :maxdepth: 1
++
++   cdev
++
++.. only::  subproject and html
++
++   Indices
++   =======
++
++   * :ref:`genindex`
+diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
+index a4c75a28c839..b5231d7f9200 100644
+--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
++++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
+@@ -324,6 +324,8 @@ Code  Seq#    Include File                                           Comments
+ 0xA3  90-9F  linux/dtlk.h
+ 0xA4  00-1F  uapi/linux/tee.h                                        Generic TEE subsystem
+ 0xA4  00-1F  uapi/asm/sgx.h                                          <mailto:linux-sgx@vger.kernel.org>
++0xA5  01     linux/surface_aggregator/cdev.h                         Microsoft Surface Platform System Aggregator
++                                                                     <mailto:luzmaximilian@gmail.com>
+ 0xAA  00-3F  linux/uapi/linux/userfaultfd.h
+ 0xAB  00-1F  linux/nbd.h
+ 0xAC  00-1F  linux/raw.h
+diff --git a/MAINTAINERS b/MAINTAINERS
+index c89d831e74e0..5d15622ce47d 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11814,7 +11814,9 @@ W:	https://github.com/linux-surface/surface-aggregator-module
+ C:	irc://chat.freenode.net/##linux-surface
+ F:	Documentation/driver-api/surface_aggregator/
+ F:	drivers/platform/surface/aggregator/
++F:	drivers/platform/surface/surface_aggregator_cdev.c
+ F:	include/linux/surface_aggregator/
++F:	include/uapi/linux/surface_aggregator/
+ 
+ MICROTEK X6 SCANNER
+ M:	Oliver Neukum <oliver@neukum.org>
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index ef6b4051e7c8..82fbcfedc6dc 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -41,6 +41,23 @@ config SURFACE_3_POWER_OPREGION
+ 	  This driver provides support for ACPI operation
+ 	  region of the Surface 3 battery platform driver.
+ 
++config SURFACE_AGGREGATOR_CDEV
++	tristate "Surface System Aggregator Module User-Space Interface"
++	depends on SURFACE_AGGREGATOR
++	help
++	  Provides a misc-device interface to the Surface System Aggregator
++	  Module (SSAM) controller.
++
++	  This option provides a module (called surface_aggregator_cdev), that,
++	  when loaded, will add a client device (and its respective driver) to
++	  the SSAM controller. Said client device manages a misc-device
++	  interface (/dev/surface/aggregator), which can be used by user-space
++	  tools to directly communicate with the SSAM EC by sending requests and
++	  receiving the corresponding responses.
++
++	  The provided interface is intended for debugging and development only,
++	  and should not be used otherwise.
++
+ config SURFACE_BOOK1_DGPU_SWITCH
+ 	tristate "Surface Book 1 dGPU Switch Driver"
+ 	depends on SYSFS
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index c5392098cfb9..644c7511f64d 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -8,6 +8,7 @@ obj-$(CONFIG_SURFACE3_WMI)		+= surface3-wmi.o
+ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
+ obj-$(CONFIG_SURFACE_AGGREGATOR)	+= aggregator/
++obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)	+= surface_aggregator_cdev.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
+new file mode 100644
+index 000000000000..340d15b148b9
+--- /dev/null
++++ b/drivers/platform/surface/surface_aggregator_cdev.c
+@@ -0,0 +1,303 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
++ * misc device. Intended for debugging and development.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/rwsem.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include <linux/surface_aggregator/cdev.h>
++#include <linux/surface_aggregator/controller.h>
++
++#define SSAM_CDEV_DEVICE_NAME	"surface_aggregator_cdev"
++
++struct ssam_cdev {
++	struct kref kref;
++	struct rw_semaphore lock;
++	struct ssam_controller *ctrl;
++	struct miscdevice mdev;
++};
++
++static void __ssam_cdev_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct ssam_cdev, kref));
++}
++
++static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
++{
++	if (cdev)
++		kref_get(&cdev->kref);
++
++	return cdev;
++}
++
++static void ssam_cdev_put(struct ssam_cdev *cdev)
++{
++	if (cdev)
++		kref_put(&cdev->kref, __ssam_cdev_release);
++}
++
++static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
++{
++	struct miscdevice *mdev = filp->private_data;
++	struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
++
++	filp->private_data = ssam_cdev_get(cdev);
++	return stream_open(inode, filp);
++}
++
++static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
++{
++	ssam_cdev_put(filp->private_data);
++	return 0;
++}
++
++static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
++{
++	struct ssam_cdev_request __user *r;
++	struct ssam_cdev_request rqst;
++	struct ssam_request spec;
++	struct ssam_response rsp;
++	const void __user *plddata;
++	void __user *rspdata;
++	int status = 0, ret = 0, tmp;
++
++	r = (struct ssam_cdev_request __user *)arg;
++	ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
++	if (ret)
++		goto out;
++
++	plddata = u64_to_user_ptr(rqst.payload.data);
++	rspdata = u64_to_user_ptr(rqst.response.data);
++
++	/* Setup basic request fields. */
++	spec.target_category = rqst.target_category;
++	spec.target_id = rqst.target_id;
++	spec.command_id = rqst.command_id;
++	spec.instance_id = rqst.instance_id;
++	spec.flags = 0;
++	spec.length = rqst.payload.length;
++	spec.payload = NULL;
++
++	if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
++		spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
++
++	if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
++		spec.flags |= SSAM_REQUEST_UNSEQUENCED;
++
++	rsp.capacity = rqst.response.length;
++	rsp.length = 0;
++	rsp.pointer = NULL;
++
++	/* Get request payload from user-space. */
++	if (spec.length) {
++		if (!plddata) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		spec.payload = kzalloc(spec.length, GFP_KERNEL);
++		if (!spec.payload) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
++			ret = -EFAULT;
++			goto out;
++		}
++	}
++
++	/* Allocate response buffer. */
++	if (rsp.capacity) {
++		if (!rspdata) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
++		if (!rsp.pointer) {
++			ret = -ENOMEM;
++			goto out;
++		}
++	}
++
++	/* Perform request. */
++	status = ssam_request_sync(cdev->ctrl, &spec, &rsp);
++	if (status)
++		goto out;
++
++	/* Copy response to user-space. */
++	if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
++		ret = -EFAULT;
++
++out:
++	/* Always try to set response-length and status. */
++	tmp = put_user(rsp.length, &r->response.length);
++	if (tmp)
++		ret = tmp;
++
++	tmp = put_user(status, &r->status);
++	if (tmp)
++		ret = tmp;
++
++	/* Cleanup. */
++	kfree(spec.payload);
++	kfree(rsp.pointer);
++
++	return ret;
++}
++
++static long __ssam_cdev_device_ioctl(struct ssam_cdev *cdev, unsigned int cmd,
++				     unsigned long arg)
++{
++	switch (cmd) {
++	case SSAM_CDEV_REQUEST:
++		return ssam_cdev_request(cdev, arg);
++
++	default:
++		return -ENOTTY;
++	}
++}
++
++static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd,
++				   unsigned long arg)
++{
++	struct ssam_cdev *cdev = file->private_data;
++	long status;
++
++	/* Ensure that controller is valid for as long as we need it. */
++	if (down_read_killable(&cdev->lock))
++		return -ERESTARTSYS;
++
++	if (!cdev->ctrl) {
++		up_read(&cdev->lock);
++		return -ENODEV;
++	}
++
++	status = __ssam_cdev_device_ioctl(cdev, cmd, arg);
++
++	up_read(&cdev->lock);
++	return status;
++}
++
++static const struct file_operations ssam_controller_fops = {
++	.owner          = THIS_MODULE,
++	.open           = ssam_cdev_device_open,
++	.release        = ssam_cdev_device_release,
++	.unlocked_ioctl = ssam_cdev_device_ioctl,
++	.compat_ioctl   = ssam_cdev_device_ioctl,
++	.llseek         = noop_llseek,
++};
++
++static int ssam_dbg_device_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct ssam_cdev *cdev;
++	int status;
++
++	ctrl = ssam_client_bind(&pdev->dev);
++	if (IS_ERR(ctrl))
++		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
++	if (!cdev)
++		return -ENOMEM;
++
++	kref_init(&cdev->kref);
++	init_rwsem(&cdev->lock);
++	cdev->ctrl = ctrl;
++
++	cdev->mdev.parent   = &pdev->dev;
++	cdev->mdev.minor    = MISC_DYNAMIC_MINOR;
++	cdev->mdev.name     = "surface_aggregator";
++	cdev->mdev.nodename = "surface/aggregator";
++	cdev->mdev.fops     = &ssam_controller_fops;
++
++	status = misc_register(&cdev->mdev);
++	if (status) {
++		kfree(cdev);
++		return status;
++	}
++
++	platform_set_drvdata(pdev, cdev);
++	return 0;
++}
++
++static int ssam_dbg_device_remove(struct platform_device *pdev)
++{
++	struct ssam_cdev *cdev = platform_get_drvdata(pdev);
++
++	misc_deregister(&cdev->mdev);
++
++	/*
++	 * The controller is only guaranteed to be valid for as long as the
++	 * driver is bound. Remove controller so that any lingering open files
++	 * cannot access it any more after we're gone.
++	 */
++	down_write(&cdev->lock);
++	cdev->ctrl = NULL;
++	up_write(&cdev->lock);
++
++	ssam_cdev_put(cdev);
++	return 0;
++}
++
++static struct platform_device *ssam_cdev_device;
++
++static struct platform_driver ssam_cdev_driver = {
++	.probe = ssam_dbg_device_probe,
++	.remove = ssam_dbg_device_remove,
++	.driver = {
++		.name = SSAM_CDEV_DEVICE_NAME,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static int __init ssam_debug_init(void)
++{
++	int status;
++
++	ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
++						 PLATFORM_DEVID_NONE);
++	if (!ssam_cdev_device)
++		return -ENOMEM;
++
++	status = platform_device_add(ssam_cdev_device);
++	if (status)
++		goto err_device;
++
++	status = platform_driver_register(&ssam_cdev_driver);
++	if (status)
++		goto err_driver;
++
++	return 0;
++
++err_driver:
++	platform_device_del(ssam_cdev_device);
++err_device:
++	platform_device_put(ssam_cdev_device);
++	return status;
++}
++module_init(ssam_debug_init);
++
++static void __exit ssam_debug_exit(void)
++{
++	platform_driver_unregister(&ssam_cdev_driver);
++	platform_device_unregister(ssam_cdev_device);
++}
++module_exit(ssam_debug_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
+new file mode 100644
+index 000000000000..fbcce04abfe9
+--- /dev/null
++++ b/include/uapi/linux/surface_aggregator/cdev.h
+@@ -0,0 +1,78 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * Surface System Aggregator Module (SSAM) user-space EC interface.
++ *
++ * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc
++ * device. This device provides direct user-space access to the SSAM EC.
++ * Intended for debugging and development.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
++#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/**
++ * enum ssam_cdev_request_flags - Request flags for SSAM cdev request IOCTL.
++ *
++ * @SSAM_CDEV_REQUEST_HAS_RESPONSE:
++ *	Specifies that the request expects a response. If not set, the request
++ *	will be directly completed after its underlying packet has been
++ *	transmitted. If set, the request transport system waits for a response
++ *	of the request.
++ *
++ * @SSAM_CDEV_REQUEST_UNSEQUENCED:
++ *	Specifies that the request should be transmitted via an unsequenced
++ *	packet. If set, the request must not have a response, meaning that this
++ *	flag and the %SSAM_CDEV_REQUEST_HAS_RESPONSE flag are mutually
++ *	exclusive.
++ */
++enum ssam_cdev_request_flags {
++	SSAM_CDEV_REQUEST_HAS_RESPONSE = 0x01,
++	SSAM_CDEV_REQUEST_UNSEQUENCED  = 0x02,
++};
++
++/**
++ * struct ssam_cdev_request - Controller request IOCTL argument.
++ * @target_category: Target category of the SAM request.
++ * @target_id:       Target ID of the SAM request.
++ * @command_id:      Command ID of the SAM request.
++ * @instance_id:     Instance ID of the SAM request.
++ * @flags:           Request flags (see &enum ssam_cdev_request_flags).
++ * @status:          Request status (output).
++ * @payload:         Request payload (input data).
++ * @payload.data:    Pointer to request payload data.
++ * @payload.length:  Length of request payload data (in bytes).
++ * @response:        Request response (output data).
++ * @response.data:   Pointer to response buffer.
++ * @response.length: On input: Capacity of response buffer (in bytes).
++ *                   On output: Length of request response (number of bytes
++ *                   in the buffer that are actually used).
++ */
++struct ssam_cdev_request {
++	__u8 target_category;
++	__u8 target_id;
++	__u8 command_id;
++	__u8 instance_id;
++	__u16 flags;
++	__s16 status;
++
++	struct {
++		__u64 data;
++		__u16 length;
++		__u8 __pad[6];
++	} payload;
++
++	struct {
++		__u64 data;
++		__u16 length;
++		__u8 __pad[6];
++	} response;
++} __attribute__((__packed__));
++
++#define SSAM_CDEV_REQUEST	_IOWR(0xA5, 1, struct ssam_cdev_request)
++
++#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
+-- 
+2.30.1
+
+From b42609f185ad3142d58ef16df9a62b57fefeef96 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 21 Dec 2020 19:39:59 +0100
+Subject: [PATCH] platform/surface: Add Surface ACPI Notify driver
+
+The Surface ACPI Notify (SAN) device provides an ACPI interface to the
+Surface Aggregator EC, specifically the Surface Serial Hub interface.
+This interface allows EC requests to be made from ACPI code and can
+convert a subset of EC events back to ACPI notifications.
+
+Specifically, this interface provides a GenericSerialBus operation
+region ACPI code can execute a request by writing the request command
+data and payload to this operation region and reading back the
+corresponding response via a write-then-read operation. Furthermore,
+this interface provides a _DSM method to be called when certain events
+from the EC have been received, essentially turning them into ACPI
+notifications.
+
+The driver provided in this commit essentially takes care of translating
+the request data written to the operation region, executing the request,
+waiting for it to finish, and finally writing and translating back the
+response (if the request has one). Furthermore, this driver takes care
+of enabling the events handled via ACPI _DSM calls. Lastly, this driver
+also exposes an interface providing discrete GPU (dGPU) power-on
+notifications on the Surface Book 2, which are also received via the
+operation region interface (but not handled by the SAN driver directly),
+making them accessible to other drivers (such as a dGPU hot-plug driver
+that may be added later on).
+
+On 5th and 6th generation Surface devices (Surface Pro 5/2017, Pro 6,
+Book 2, Laptop 1 and 2), the SAN interface provides full battery and
+thermal subsystem access, as well as other EC based functionality. On
+those models, battery and thermal sensor devices are implemented as
+standard ACPI devices of that type, however, forward ACPI calls to the
+corresponding Surface Aggregator EC request via the SAN interface and
+receive corresponding notifications (e.g. battery information change)
+from it. This interface is therefore required to provide said
+functionality on those devices.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20201221183959.1186143-10-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ .../surface_aggregator/clients/index.rst      |   1 +
+ .../surface_aggregator/clients/san.rst        |  44 +
+ MAINTAINERS                                   |   2 +
+ drivers/platform/surface/Kconfig              |  19 +
+ drivers/platform/surface/Makefile             |   1 +
+ .../platform/surface/surface_acpi_notify.c    | 886 ++++++++++++++++++
+ include/linux/surface_acpi_notify.h           |  39 +
+ 7 files changed, 992 insertions(+)
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/san.rst
+ create mode 100644 drivers/platform/surface/surface_acpi_notify.c
+ create mode 100644 include/linux/surface_acpi_notify.h
+
+diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
+index ab260ec82cfb..3ccabce23271 100644
+--- a/Documentation/driver-api/surface_aggregator/clients/index.rst
++++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
+@@ -11,6 +11,7 @@ This is the documentation for client drivers themselves. Refer to
+    :maxdepth: 1
+ 
+    cdev
++   san
+ 
+ .. only::  subproject and html
+ 
+diff --git a/Documentation/driver-api/surface_aggregator/clients/san.rst b/Documentation/driver-api/surface_aggregator/clients/san.rst
+new file mode 100644
+index 000000000000..38c2580e7758
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/san.rst
+@@ -0,0 +1,44 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |san_client_link| replace:: :c:func:`san_client_link`
++.. |san_dgpu_notifier_register| replace:: :c:func:`san_dgpu_notifier_register`
++.. |san_dgpu_notifier_unregister| replace:: :c:func:`san_dgpu_notifier_unregister`
++
++===================
++Surface ACPI Notify
++===================
++
++The Surface ACPI Notify (SAN) device provides the bridge between ACPI and
++SAM controller. Specifically, ACPI code can execute requests and handle
++battery and thermal events via this interface. In addition to this, events
++relating to the discrete GPU (dGPU) of the Surface Book 2 can be sent from
++ACPI code (note: the Surface Book 3 uses a different method for this). The
++only currently known event sent via this interface is a dGPU power-on
++notification. While this driver handles the former part internally, it only
++relays the dGPU events to any other driver interested via its public API and
++does not handle them.
++
++The public interface of this driver is split into two parts: Client
++registration and notifier-block registration.
++
++A client to the SAN interface can be linked as consumer to the SAN device
++via |san_client_link|. This can be used to ensure that the a client
++receiving dGPU events does not miss any events due to the SAN interface not
++being set up as this forces the client driver to unbind once the SAN driver
++is unbound.
++
++Notifier-blocks can be registered by any device for as long as the module is
++loaded, regardless of being linked as client or not. Registration is done
++with |san_dgpu_notifier_register|. If the notifier is not needed any more, it
++should be unregistered via |san_dgpu_notifier_unregister|.
++
++Consult the API documentation below for more details.
++
++
++API Documentation
++=================
++
++.. kernel-doc:: include/linux/surface_acpi_notify.h
++
++.. kernel-doc:: drivers/platform/surface/surface_acpi_notify.c
++    :export:
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 5d15622ce47d..d5fe6fdb0341 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11814,7 +11814,9 @@ W:	https://github.com/linux-surface/surface-aggregator-module
+ C:	irc://chat.freenode.net/##linux-surface
+ F:	Documentation/driver-api/surface_aggregator/
+ F:	drivers/platform/surface/aggregator/
++F:	drivers/platform/surface/surface_acpi_notify.c
+ F:	drivers/platform/surface/surface_aggregator_cdev.c
++F:	include/linux/surface_acpi_notify.h
+ F:	include/linux/surface_aggregator/
+ F:	include/uapi/linux/surface_aggregator/
+ 
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index 82fbcfedc6dc..b0b91fa2f6a1 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -41,6 +41,25 @@ config SURFACE_3_POWER_OPREGION
+ 	  This driver provides support for ACPI operation
+ 	  region of the Surface 3 battery platform driver.
+ 
++config SURFACE_ACPI_NOTIFY
++	tristate "Surface ACPI Notify Driver"
++	depends on SURFACE_AGGREGATOR
++	help
++	  Surface ACPI Notify (SAN) driver for Microsoft Surface devices.
++
++	  This driver provides support for the ACPI interface (called SAN) of
++	  the Surface System Aggregator Module (SSAM) EC. This interface is used
++	  on 5th- and 6th-generation Microsoft Surface devices (including
++	  Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in
++	  reduced functionality on the Surface Laptop 3) to execute SSAM
++	  requests directly from ACPI code, as well as receive SSAM events and
++	  turn them into ACPI notifications. It essentially acts as a
++	  translation layer between the SSAM controller and ACPI.
++
++	  Specifically, this driver may be needed for battery status reporting,
++	  thermal sensor access, and real-time clock information, depending on
++	  the Surface device in question.
++
+ config SURFACE_AGGREGATOR_CDEV
+ 	tristate "Surface System Aggregator Module User-Space Interface"
+ 	depends on SURFACE_AGGREGATOR
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 644c7511f64d..72f4d9fbb6be 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -7,6 +7,7 @@
+ obj-$(CONFIG_SURFACE3_WMI)		+= surface3-wmi.o
+ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
++obj-$(CONFIG_SURFACE_ACPI_NOTIFY)	+= surface_acpi_notify.o
+ obj-$(CONFIG_SURFACE_AGGREGATOR)	+= aggregator/
+ obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)	+= surface_aggregator_cdev.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
+new file mode 100644
+index 000000000000..8cd67a669c86
+--- /dev/null
++++ b/drivers/platform/surface/surface_acpi_notify.c
+@@ -0,0 +1,886 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Driver for the Surface ACPI Notify (SAN) interface/shim.
++ *
++ * Translates communication from ACPI to Surface System Aggregator Module
++ * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
++ * events back to ACPI notifications. Allows handling of discrete GPU
++ * notifications sent from ACPI via the SAN interface by providing them to any
++ * registered external driver.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/notifier.h>
++#include <linux/platform_device.h>
++#include <linux/rwsem.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_acpi_notify.h>
++
++struct san_data {
++	struct device *dev;
++	struct ssam_controller *ctrl;
++
++	struct acpi_connection_info info;
++
++	struct ssam_event_notifier nf_bat;
++	struct ssam_event_notifier nf_tmp;
++};
++
++#define to_san_data(ptr, member) \
++	container_of(ptr, struct san_data, member)
++
++
++/* -- dGPU notifier interface. ---------------------------------------------- */
++
++struct san_rqsg_if {
++	struct rw_semaphore lock;
++	struct device *dev;
++	struct blocking_notifier_head nh;
++};
++
++static struct san_rqsg_if san_rqsg_if = {
++	.lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
++	.dev = NULL,
++	.nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
++};
++
++static int san_set_rqsg_interface_device(struct device *dev)
++{
++	int status = 0;
++
++	down_write(&san_rqsg_if.lock);
++	if (!san_rqsg_if.dev && dev)
++		san_rqsg_if.dev = dev;
++	else
++		status = -EBUSY;
++	up_write(&san_rqsg_if.lock);
++
++	return status;
++}
++
++/**
++ * san_client_link() - Link client as consumer to SAN device.
++ * @client: The client to link.
++ *
++ * Sets up a device link between the provided client device as consumer and
++ * the SAN device as provider. This function can be used to ensure that the
++ * SAN interface has been set up and will be set up for as long as the driver
++ * of the client device is bound. This guarantees that, during that time, all
++ * dGPU events will be received by any registered notifier.
++ *
++ * The link will be automatically removed once the client device's driver is
++ * unbound.
++ *
++ * Return: Returns zero on success, %-ENXIO if the SAN interface has not been
++ * set up yet, and %-ENOMEM if device link creation failed.
++ */
++int san_client_link(struct device *client)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
++	struct device_link *link;
++
++	down_read(&san_rqsg_if.lock);
++
++	if (!san_rqsg_if.dev) {
++		up_read(&san_rqsg_if.lock);
++		return -ENXIO;
++	}
++
++	link = device_link_add(client, san_rqsg_if.dev, flags);
++	if (!link) {
++		up_read(&san_rqsg_if.lock);
++		return -ENOMEM;
++	}
++
++	if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
++		up_read(&san_rqsg_if.lock);
++		return -ENXIO;
++	}
++
++	up_read(&san_rqsg_if.lock);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(san_client_link);
++
++/**
++ * san_dgpu_notifier_register() - Register a SAN dGPU notifier.
++ * @nb: The notifier-block to register.
++ *
++ * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
++ * ACPI. The registered notifier will be called with &struct san_dgpu_event
++ * as notifier data and the command ID of that event as notifier action.
++ */
++int san_dgpu_notifier_register(struct notifier_block *nb)
++{
++	return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
++}
++EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
++
++/**
++ * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
++ * @nb: The notifier-block to unregister.
++ */
++int san_dgpu_notifier_unregister(struct notifier_block *nb)
++{
++	return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
++}
++EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
++
++static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
++{
++	int ret;
++
++	ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
++	return notifier_to_errno(ret);
++}
++
++
++/* -- ACPI _DSM event relay. ------------------------------------------------ */
++
++#define SAN_DSM_REVISION	0
++
++/* 93b666c5-70c6-469f-a215-3d487c91ab3c */
++static const guid_t SAN_DSM_UUID =
++	GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
++		  0x48, 0x7c, 0x91, 0xab, 0x3c);
++
++enum san_dsm_event_fn {
++	SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
++	SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
++	SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
++	SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
++	SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
++	SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
++	SAN_DSM_EVENT_FN_THERMAL   = 0x09,
++	SAN_DSM_EVENT_FN_DPTF      = 0x0a,
++};
++
++enum sam_event_cid_bat {
++	SAM_EVENT_CID_BAT_BIX  = 0x15,
++	SAM_EVENT_CID_BAT_BST  = 0x16,
++	SAM_EVENT_CID_BAT_ADP  = 0x17,
++	SAM_EVENT_CID_BAT_PROT = 0x18,
++	SAM_EVENT_CID_BAT_DPTF = 0x4f,
++};
++
++enum sam_event_cid_tmp {
++	SAM_EVENT_CID_TMP_TRIP = 0x0b,
++};
++
++struct san_event_work {
++	struct delayed_work work;
++	struct device *dev;
++	struct ssam_event event;	/* must be last */
++};
++
++static int san_acpi_notify_event(struct device *dev, u64 func,
++				 union acpi_object *param)
++{
++	acpi_handle san = ACPI_HANDLE(dev);
++	union acpi_object *obj;
++	int status = 0;
++
++	if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func))
++		return 0;
++
++	dev_dbg(dev, "notify event %#04llx\n", func);
++
++	obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
++				      func, param, ACPI_TYPE_BUFFER);
++	if (!obj)
++		return -EFAULT;
++
++	if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
++		dev_err(dev, "got unexpected result from _DSM\n");
++		status = -EPROTO;
++	}
++
++	ACPI_FREE(obj);
++	return status;
++}
++
++static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
++{
++	int status;
++
++	status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
++	if (status)
++		return status;
++
++	/*
++	 * Ensure that the battery states get updated correctly. When the
++	 * battery is fully charged and an adapter is plugged in, it sometimes
++	 * is not updated correctly, instead showing it as charging.
++	 * Explicitly trigger battery updates to fix this.
++	 */
++
++	status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
++	if (status)
++		return status;
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
++}
++
++static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
++{
++	enum san_dsm_event_fn fn;
++
++	if (event->instance_id == 0x02)
++		fn = SAN_DSM_EVENT_FN_BAT2_INFO;
++	else
++		fn = SAN_DSM_EVENT_FN_BAT1_INFO;
++
++	return san_acpi_notify_event(dev, fn, NULL);
++}
++
++static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
++{
++	enum san_dsm_event_fn fn;
++
++	if (event->instance_id == 0x02)
++		fn = SAN_DSM_EVENT_FN_BAT2_STAT;
++	else
++		fn = SAN_DSM_EVENT_FN_BAT1_STAT;
++
++	return san_acpi_notify_event(dev, fn, NULL);
++}
++
++static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
++{
++	union acpi_object payload;
++
++	/*
++	 * The Surface ACPI expects a buffer and not a package. It specifically
++	 * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
++	 * acpica/nsarguments.c, but that warning can be safely ignored.
++	 */
++	payload.type = ACPI_TYPE_BUFFER;
++	payload.buffer.length = event->length;
++	payload.buffer.pointer = (u8 *)&event->data[0];
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
++}
++
++static unsigned long san_evt_bat_delay(u8 cid)
++{
++	switch (cid) {
++	case SAM_EVENT_CID_BAT_ADP:
++		/*
++		 * Wait for battery state to update before signaling adapter
++		 * change.
++		 */
++		return msecs_to_jiffies(5000);
++
++	case SAM_EVENT_CID_BAT_BST:
++		/* Ensure we do not miss anything important due to caching. */
++		return msecs_to_jiffies(2000);
++
++	default:
++		return 0;
++	}
++}
++
++static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
++{
++	int status;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_BIX:
++		status = san_evt_bat_bix(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_BST:
++		status = san_evt_bat_bst(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_ADP:
++		status = san_evt_bat_adp(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_PROT:
++		/*
++		 * TODO: Implement support for battery protection status change
++		 *       event.
++		 */
++		return true;
++
++	case SAM_EVENT_CID_BAT_DPTF:
++		status = san_evt_bat_dptf(dev, event);
++		break;
++
++	default:
++		return false;
++	}
++
++	if (status) {
++		dev_err(dev, "error handling power event (cid = %#04x)\n",
++			event->command_id);
++	}
++
++	return true;
++}
++
++static void san_evt_bat_workfn(struct work_struct *work)
++{
++	struct san_event_work *ev;
++
++	ev = container_of(work, struct san_event_work, work.work);
++	san_evt_bat(&ev->event, ev->dev);
++	kfree(ev);
++}
++
++static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
++			  const struct ssam_event *event)
++{
++	struct san_data *d = to_san_data(nf, nf_bat);
++	struct san_event_work *work;
++	unsigned long delay = san_evt_bat_delay(event->command_id);
++
++	if (delay == 0)
++		return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
++
++	work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
++	if (!work)
++		return ssam_notifier_from_errno(-ENOMEM);
++
++	INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
++	work->dev = d->dev;
++
++	memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
++
++	schedule_delayed_work(&work->work, delay);
++	return SSAM_NOTIF_HANDLED;
++}
++
++static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
++{
++	union acpi_object param;
++
++	/*
++	 * The Surface ACPI expects an integer and not a package. This will
++	 * cause a warning in acpica/nsarguments.c, but that warning can be
++	 * safely ignored.
++	 */
++	param.type = ACPI_TYPE_INTEGER;
++	param.integer.value = event->instance_id;
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, &param);
++}
++
++static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
++{
++	int status;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_TMP_TRIP:
++		status = san_evt_tmp_trip(dev, event);
++		break;
++
++	default:
++		return false;
++	}
++
++	if (status) {
++		dev_err(dev, "error handling thermal event (cid = %#04x)\n",
++			event->command_id);
++	}
++
++	return true;
++}
++
++static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
++			  const struct ssam_event *event)
++{
++	struct san_data *d = to_san_data(nf, nf_tmp);
++
++	return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
++}
++
++
++/* -- ACPI GSB OperationRegion handler -------------------------------------- */
++
++struct gsb_data_in {
++	u8 cv;
++} __packed;
++
++struct gsb_data_rqsx {
++	u8 cv;				/* Command value (san_gsb_request_cv). */
++	u8 tc;				/* Target category. */
++	u8 tid;				/* Target ID. */
++	u8 iid;				/* Instance ID. */
++	u8 snc;				/* Expect-response-flag. */
++	u8 cid;				/* Command ID. */
++	u16 cdl;			/* Payload length. */
++	u8 pld[];			/* Payload. */
++} __packed;
++
++struct gsb_data_etwl {
++	u8 cv;				/* Command value (should be 0x02). */
++	u8 etw3;			/* Unknown. */
++	u8 etw4;			/* Unknown. */
++	u8 msg[];			/* Error message (ASCIIZ). */
++} __packed;
++
++struct gsb_data_out {
++	u8 status;			/* _SSH communication status. */
++	u8 len;				/* _SSH payload length. */
++	u8 pld[];			/* _SSH payload. */
++} __packed;
++
++union gsb_buffer_data {
++	struct gsb_data_in   in;	/* Common input. */
++	struct gsb_data_rqsx rqsx;	/* RQSX input. */
++	struct gsb_data_etwl etwl;	/* ETWL input. */
++	struct gsb_data_out  out;	/* Output. */
++};
++
++struct gsb_buffer {
++	u8 status;			/* GSB AttribRawProcess status. */
++	u8 len;				/* GSB AttribRawProcess length. */
++	union gsb_buffer_data data;
++} __packed;
++
++#define SAN_GSB_MAX_RQSX_PAYLOAD  (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
++#define SAN_GSB_MAX_RESPONSE	  (U8_MAX - 2 - sizeof(struct gsb_data_out))
++
++#define SAN_GSB_COMMAND		0
++
++enum san_gsb_request_cv {
++	SAN_GSB_REQUEST_CV_RQST = 0x01,
++	SAN_GSB_REQUEST_CV_ETWL = 0x02,
++	SAN_GSB_REQUEST_CV_RQSG = 0x03,
++};
++
++#define SAN_REQUEST_NUM_TRIES	5
++
++static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
++{
++	struct gsb_data_etwl *etwl = &b->data.etwl;
++
++	if (b->len < sizeof(struct gsb_data_etwl)) {
++		dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
++		return AE_OK;
++	}
++
++	dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4,
++		(unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
++		(char *)etwl->msg);
++
++	/* Indicate success. */
++	b->status = 0x00;
++	b->len = 0x00;
++
++	return AE_OK;
++}
++
++static
++struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type,
++					struct gsb_buffer *b)
++{
++	struct gsb_data_rqsx *rqsx = &b->data.rqsx;
++
++	if (b->len < sizeof(struct gsb_data_rqsx)) {
++		dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
++		return NULL;
++	}
++
++	if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
++		dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
++			type, b->len, get_unaligned(&rqsx->cdl));
++		return NULL;
++	}
++
++	if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
++		dev_err(dev, "payload for %s package too large (cdl = %d)\n",
++			type, get_unaligned(&rqsx->cdl));
++		return NULL;
++	}
++
++	return rqsx;
++}
++
++static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
++{
++	gsb->status = 0x00;
++	gsb->len = 0x02;
++	gsb->data.out.status = (u8)(-status);
++	gsb->data.out.len = 0x00;
++}
++
++static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
++{
++	gsb->status = 0x00;
++	gsb->len = len + 2;
++	gsb->data.out.status = 0x00;
++	gsb->data.out.len = len;
++
++	if (len)
++		memcpy(&gsb->data.out.pld[0], ptr, len);
++}
++
++static acpi_status san_rqst_fixup_suspended(struct san_data *d,
++					    struct ssam_request *rqst,
++					    struct gsb_buffer *gsb)
++{
++	if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
++		u8 base_state = 1;
++
++		/* Base state quirk:
++		 * The base state may be queried from ACPI when the EC is still
++		 * suspended. In this case it will return '-EPERM'. This query
++		 * will only be triggered from the ACPI lid GPE interrupt, thus
++		 * we are either in laptop or studio mode (base status 0x01 or
++		 * 0x02). Furthermore, we will only get here if the device (and
++		 * EC) have been suspended.
++		 *
++		 * We now assume that the device is in laptop mode (0x01). This
++		 * has the drawback that it will wake the device when unfolding
++		 * it in studio mode, but it also allows us to avoid actively
++		 * waiting for the EC to wake up, which may incur a notable
++		 * delay.
++		 */
++
++		dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
++
++		gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
++		return AE_OK;
++	}
++
++	gsb_rqsx_response_error(gsb, -ENXIO);
++	return AE_OK;
++}
++
++static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
++{
++	u8 rspbuf[SAN_GSB_MAX_RESPONSE];
++	struct gsb_data_rqsx *gsb_rqst;
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	int status = 0;
++
++	gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
++	if (!gsb_rqst)
++		return AE_OK;
++
++	rqst.target_category = gsb_rqst->tc;
++	rqst.target_id = gsb_rqst->tid;
++	rqst.command_id = gsb_rqst->cid;
++	rqst.instance_id = gsb_rqst->iid;
++	rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
++	rqst.length = get_unaligned(&gsb_rqst->cdl);
++	rqst.payload = &gsb_rqst->pld[0];
++
++	rsp.capacity = ARRAY_SIZE(rspbuf);
++	rsp.length = 0;
++	rsp.pointer = &rspbuf[0];
++
++	/* Handle suspended device. */
++	if (d->dev->power.is_suspended) {
++		dev_warn(d->dev, "rqst: device is suspended, not executing\n");
++		return san_rqst_fixup_suspended(d, &rqst, buffer);
++	}
++
++	status = __ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
++			      d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
++
++	if (!status) {
++		gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
++	} else {
++		dev_err(d->dev, "rqst: failed with error %d\n", status);
++		gsb_rqsx_response_error(buffer, status);
++	}
++
++	return AE_OK;
++}
++
++static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
++{
++	struct gsb_data_rqsx *gsb_rqsg;
++	struct san_dgpu_event evt;
++	int status;
++
++	gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
++	if (!gsb_rqsg)
++		return AE_OK;
++
++	evt.category = gsb_rqsg->tc;
++	evt.target = gsb_rqsg->tid;
++	evt.command = gsb_rqsg->cid;
++	evt.instance = gsb_rqsg->iid;
++	evt.length = get_unaligned(&gsb_rqsg->cdl);
++	evt.payload = &gsb_rqsg->pld[0];
++
++	status = san_dgpu_notifier_call(&evt);
++	if (!status) {
++		gsb_rqsx_response_success(buffer, NULL, 0);
++	} else {
++		dev_err(d->dev, "rqsg: failed with error %d\n", status);
++		gsb_rqsx_response_error(buffer, status);
++	}
++
++	return AE_OK;
++}
++
++static acpi_status san_opreg_handler(u32 function, acpi_physical_address command,
++				     u32 bits, u64 *value64, void *opreg_context,
++				     void *region_context)
++{
++	struct san_data *d = to_san_data(opreg_context, info);
++	struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
++	int accessor_type = (function & 0xFFFF0000) >> 16;
++
++	if (command != SAN_GSB_COMMAND) {
++		dev_warn(d->dev, "unsupported command: %#04llx\n", command);
++		return AE_OK;
++	}
++
++	if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
++		dev_err(d->dev, "invalid access type: %#04x\n", accessor_type);
++		return AE_OK;
++	}
++
++	/* Buffer must have at least contain the command-value. */
++	if (buffer->len == 0) {
++		dev_err(d->dev, "request-package too small\n");
++		return AE_OK;
++	}
++
++	switch (buffer->data.in.cv) {
++	case SAN_GSB_REQUEST_CV_RQST:
++		return san_rqst(d, buffer);
++
++	case SAN_GSB_REQUEST_CV_ETWL:
++		return san_etwl(d, buffer);
++
++	case SAN_GSB_REQUEST_CV_RQSG:
++		return san_rqsg(d, buffer);
++
++	default:
++		dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n",
++			 buffer->data.in.cv);
++		return AE_OK;
++	}
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int san_events_register(struct platform_device *pdev)
++{
++	struct san_data *d = platform_get_drvdata(pdev);
++	int status;
++
++	d->nf_bat.base.priority = 1;
++	d->nf_bat.base.fn = san_evt_bat_nf;
++	d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
++	d->nf_bat.event.id.instance = 0;
++	d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
++	d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
++
++	d->nf_tmp.base.priority = 1;
++	d->nf_tmp.base.fn = san_evt_tmp_nf;
++	d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
++	d->nf_tmp.event.id.instance = 0;
++	d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
++	d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
++
++	status = ssam_notifier_register(d->ctrl, &d->nf_bat);
++	if (status)
++		return status;
++
++	status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
++	if (status)
++		ssam_notifier_unregister(d->ctrl, &d->nf_bat);
++
++	return status;
++}
++
++static void san_events_unregister(struct platform_device *pdev)
++{
++	struct san_data *d = platform_get_drvdata(pdev);
++
++	ssam_notifier_unregister(d->ctrl, &d->nf_bat);
++	ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
++}
++
++#define san_consumer_printk(level, dev, handle, fmt, ...)			\
++do {										\
++	char *path = "<error getting consumer path>";				\
++	struct acpi_buffer buffer = {						\
++		.length = ACPI_ALLOCATE_BUFFER,					\
++		.pointer = NULL,						\
++	};									\
++										\
++	if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))	\
++		path = buffer.pointer;						\
++										\
++	dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__);			\
++	kfree(buffer.pointer);							\
++} while (0)
++
++#define san_consumer_dbg(dev, handle, fmt, ...) \
++	san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
++
++#define san_consumer_warn(dev, handle, fmt, ...) \
++	san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
++
++static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
++{
++	struct acpi_handle_list dep_devices;
++	acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
++	acpi_status status;
++	int i;
++
++	if (!acpi_has_method(handle, "_DEP"))
++		return false;
++
++	status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
++	if (ACPI_FAILURE(status)) {
++		san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
++		return false;
++	}
++
++	for (i = 0; i < dep_devices.count; i++) {
++		if (dep_devices.handles[i] == supplier)
++			return true;
++	}
++
++	return false;
++}
++
++static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
++				      void *context, void **rv)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
++	struct platform_device *pdev = context;
++	struct acpi_device *adev;
++	struct device_link *link;
++
++	if (!is_san_consumer(pdev, handle))
++		return AE_OK;
++
++	/* Ignore ACPI devices that are not present. */
++	if (acpi_bus_get_device(handle, &adev) != 0)
++		return AE_OK;
++
++	san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
++
++	/* Try to set up device links, ignore but log errors. */
++	link = device_link_add(&adev->dev, &pdev->dev, flags);
++	if (!link) {
++		san_consumer_warn(&pdev->dev, handle, "failed to create device link\n");
++		return AE_OK;
++	}
++
++	return AE_OK;
++}
++
++static int san_consumer_links_setup(struct platform_device *pdev)
++{
++	acpi_status status;
++
++	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
++				     ACPI_UINT32_MAX, san_consumer_setup, NULL,
++				     pdev, NULL);
++
++	return status ? -EFAULT : 0;
++}
++
++static int san_probe(struct platform_device *pdev)
++{
++	acpi_handle san = ACPI_HANDLE(&pdev->dev);
++	struct ssam_controller *ctrl;
++	struct san_data *data;
++	acpi_status astatus;
++	int status;
++
++	ctrl = ssam_client_bind(&pdev->dev);
++	if (IS_ERR(ctrl))
++		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++	status = san_consumer_links_setup(pdev);
++	if (status)
++		return status;
++
++	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	data->dev = &pdev->dev;
++	data->ctrl = ctrl;
++
++	platform_set_drvdata(pdev, data);
++
++	astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++						     &san_opreg_handler, NULL,
++						     &data->info);
++	if (ACPI_FAILURE(astatus))
++		return -ENXIO;
++
++	status = san_events_register(pdev);
++	if (status)
++		goto err_enable_events;
++
++	status = san_set_rqsg_interface_device(&pdev->dev);
++	if (status)
++		goto err_install_dev;
++
++	acpi_walk_dep_device_list(san);
++	return 0;
++
++err_install_dev:
++	san_events_unregister(pdev);
++err_enable_events:
++	acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++					  &san_opreg_handler);
++	return status;
++}
++
++static int san_remove(struct platform_device *pdev)
++{
++	acpi_handle san = ACPI_HANDLE(&pdev->dev);
++
++	san_set_rqsg_interface_device(NULL);
++	acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++					  &san_opreg_handler);
++	san_events_unregister(pdev);
++
++	/*
++	 * We have unregistered our event sources. Now we need to ensure that
++	 * all delayed works they may have spawned are run to completion.
++	 */
++	flush_scheduled_work();
++
++	return 0;
++}
++
++static const struct acpi_device_id san_match[] = {
++	{ "MSHW0091" },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, san_match);
++
++static struct platform_driver surface_acpi_notify = {
++	.probe = san_probe,
++	.remove = san_remove,
++	.driver = {
++		.name = "surface_acpi_notify",
++		.acpi_match_table = san_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_acpi_notify);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
+new file mode 100644
+index 000000000000..8e3e86c7d78c
+--- /dev/null
++++ b/include/linux/surface_acpi_notify.h
+@@ -0,0 +1,39 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Interface for Surface ACPI Notify (SAN) driver.
++ *
++ * Provides access to discrete GPU notifications sent from ACPI via the SAN
++ * driver, which are not handled by this driver directly.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
++#define _LINUX_SURFACE_ACPI_NOTIFY_H
++
++#include <linux/notifier.h>
++#include <linux/types.h>
++
++/**
++ * struct san_dgpu_event - Discrete GPU ACPI event.
++ * @category: Category of the event.
++ * @target:   Target ID of the event source.
++ * @command:  Command ID of the event.
++ * @instance: Instance ID of the event source.
++ * @length:   Length of the event's payload data (in bytes).
++ * @payload:  Pointer to the event's payload data.
++ */
++struct san_dgpu_event {
++	u8 category;
++	u8 target;
++	u8 command;
++	u8 instance;
++	u16 length;
++	u8 *payload;
++};
++
++int san_client_link(struct device *client);
++int san_dgpu_notifier_register(struct notifier_block *nb);
++int san_dgpu_notifier_unregister(struct notifier_block *nb);
++
++#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
+-- 
+2.30.1
+
+From c4b99c73cfee54e7cf59c1cf45b4aee78fce2c81 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 11 Jan 2021 14:46:48 +0000
+Subject: [PATCH] platform/surface: fix potential integer overflow on shift of
+ a int
+
+The left shift of int 32 bit integer constant 1 is evaluated using 32 bit
+arithmetic and then passed as a 64 bit function argument. In the case where
+func is 32 or more this can lead to an oveflow.  Avoid this by shifting
+using the BIT_ULL macro instead.
+
+Addresses-Coverity: ("Unintentional integer overflow")
+Fixes: fc00bc8ac1da ("platform/surface: Add Surface ACPI Notify driver")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210111144648.20498-1-colin.king@canonical.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/surface_acpi_notify.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
+index 8cd67a669c86..ef9c1f8e8336 100644
+--- a/drivers/platform/surface/surface_acpi_notify.c
++++ b/drivers/platform/surface/surface_acpi_notify.c
+@@ -188,7 +188,7 @@ static int san_acpi_notify_event(struct device *dev, u64 func,
+ 	union acpi_object *obj;
+ 	int status = 0;
+ 
+-	if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func))
++	if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func)))
+ 		return 0;
+ 
+ 	dev_dbg(dev, "notify event %#04llx\n", func);
+-- 
+2.30.1
+
+From dcda1b902b58d38b65fb1ed95d1657839e657d73 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 11 Jan 2021 16:48:50 +0100
+Subject: [PATCH] platform/surface: aggregator_cdev: Fix access of
+ uninitialized variables
+
+When copy_struct_from_user() in ssam_cdev_request() fails, we directly
+jump to the 'out' label. In this case, however 'spec' and 'rsp' are not
+initialized, but we still access fields of those variables. Fix this by
+initializing them at the time of their declaration.
+
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Fixes: 178f6ab77e61 ("platform/surface: Add Surface Aggregator user-space interface")
+Addresses-Coverity: ("Uninitialized pointer read")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210111154851.325404-2-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/surface_aggregator_cdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
+index 340d15b148b9..979340cdd9de 100644
+--- a/drivers/platform/surface/surface_aggregator_cdev.c
++++ b/drivers/platform/surface/surface_aggregator_cdev.c
+@@ -66,8 +66,8 @@ static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
+ {
+ 	struct ssam_cdev_request __user *r;
+ 	struct ssam_cdev_request rqst;
+-	struct ssam_request spec;
+-	struct ssam_response rsp;
++	struct ssam_request spec = {};
++	struct ssam_response rsp = {};
+ 	const void __user *plddata;
+ 	void __user *rspdata;
+ 	int status = 0, ret = 0, tmp;
+-- 
+2.30.1
+
+From 023b9d531d3e26d351d0aaed8c785fb2a3434f49 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 11 Jan 2021 16:48:51 +0100
+Subject: [PATCH] platform/surface: aggregator_cdev: Add comments regarding
+ unchecked allocation size
+
+CI static analysis complains about the allocation size in payload and
+response buffers being unchecked. In general, these allocations should
+be safe as the user-input is u16 and thus limited to U16_MAX, which is
+only slightly larger than the theoretical maximum imposed by the
+underlying SSH protocol.
+
+All bounds on these values required by the underlying protocol are
+enforced in ssam_request_sync() (or rather the functions called by it),
+thus bounds here are only relevant for allocation.
+
+Add comments explaining that this should be safe.
+
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Fixes: 178f6ab77e61 ("platform/surface: Add Surface Aggregator user-space interface")
+Addresses-Coverity: ("Untrusted allocation size")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210111154851.325404-3-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ .../surface/surface_aggregator_cdev.c         | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
+index 979340cdd9de..79e28fab7e40 100644
+--- a/drivers/platform/surface/surface_aggregator_cdev.c
++++ b/drivers/platform/surface/surface_aggregator_cdev.c
+@@ -106,6 +106,15 @@ static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
+ 			goto out;
+ 		}
+ 
++		/*
++		 * Note: spec.length is limited to U16_MAX bytes via struct
++		 * ssam_cdev_request. This is slightly larger than the
++		 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
++		 * underlying protocol (note that nothing remotely this size
++		 * should ever be allocated in any normal case). This size is
++		 * validated later in ssam_request_sync(), for allocation the
++		 * bound imposed by u16 should be enough.
++		 */
+ 		spec.payload = kzalloc(spec.length, GFP_KERNEL);
+ 		if (!spec.payload) {
+ 			ret = -ENOMEM;
+@@ -125,6 +134,16 @@ static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
+ 			goto out;
+ 		}
+ 
++		/*
++		 * Note: rsp.capacity is limited to U16_MAX bytes via struct
++		 * ssam_cdev_request. This is slightly larger than the
++		 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
++		 * underlying protocol (note that nothing remotely this size
++		 * should ever be allocated in any normal case). In later use,
++		 * this capacity does not have to be strictly bounded, as it
++		 * is only used as an output buffer to be written to. For
++		 * allocation the bound imposed by u16 should be enough.
++		 */
+ 		rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
+ 		if (!rsp.pointer) {
+ 			ret = -ENOMEM;
+-- 
+2.30.1
+
+From 794b4e9ca8d93da88444b48aa486e923f7c1adb4 Mon Sep 17 00:00:00 2001
+From: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Date: Thu, 14 Jan 2021 09:04:52 +0100
+Subject: [PATCH] platform/surface: aggregator: fix a kernel-doc markup
+
+A function has a different name between their prototype
+and its kernel-doc markup:
+
+	../drivers/platform/surface/aggregator/ssh_request_layer.c:1065: warning: expecting prototype for ssh_rtl_tx_start(). Prototype was for ssh_rtl_start() instead
+
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Reviewed-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/4a6bf33cfbd06654d78294127f2b6d354d073089.1610610937.git.mchehab+huawei@kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/ssh_request_layer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+index bb1c862411a2..25db4d638cfa 100644
+--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -1056,7 +1056,7 @@ void ssh_rtl_destroy(struct ssh_rtl *rtl)
+ }
+ 
+ /**
+- * ssh_rtl_tx_start() - Start request transmitter and receiver.
++ * ssh_rtl_start() - Start request transmitter and receiver.
+  * @rtl: The request transport layer.
+  *
+  * Return: Returns zero on success, a negative error code on failure.
+-- 
+2.30.1
+
+From 475b311fa4c01f4391de90730daf0d1e718f5ada Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 14 Jan 2021 16:08:26 +0100
+Subject: [PATCH] platform/surface: aggregator: Fix kernel-doc references
+
+Both, ssh_rtl_rx_start() and ssh_rtl_tx_start() functions, do not exist
+and have been consolidated into ssh_rtl_start(). Nevertheless,
+kernel-doc references the former functions. Replace those references
+with references to ssh_rtl_start().
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210114150826.19109-1-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/ssh_request_layer.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+index 25db4d638cfa..52a83a8fcf82 100644
+--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -1004,9 +1004,8 @@ int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+  *
+  * Initializes the given request transport layer and associated packet
+  * transport layer. Transmitter and receiver threads must be started
+- * separately via ssh_rtl_tx_start() and ssh_rtl_rx_start(), after the
+- * request-layer has been initialized and the lower-level serial device layer
+- * has been set up.
++ * separately via ssh_rtl_start(), after the request-layer has been
++ * initialized and the lower-level serial device layer has been set up.
+  *
+  * Return: Returns zero on success and a nonzero error code on failure.
+  */
+-- 
+2.30.1
+
+From 9f0cf2d2b5da3a6c3bb040794d295ee99a4ad5b2 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Tue, 26 Jan 2021 18:22:02 +0100
+Subject: [PATCH] platform/surface: aggregator: Fix braces in if condition with
+ unlikely() macro
+
+The braces of the unlikely() macro inside the if condition only cover
+the subtraction part, not the whole statement. This causes the result of
+the subtraction to be converted to zero or one. While that still works
+in this context, it causes static analysis tools to complain (and is
+just plain wrong).
+
+Fix the bracket placement and, while at it, simplify the if-condition.
+Also add a comment to the if-condition explaining what we expect the
+result to be and what happens on the failure path, as it seems to have
+caused a bit of confusion.
+
+This commit should not cause any difference in behavior or generated
+code.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: c167b9c7e3d6 ("platform/surface: Add Surface Aggregator subsystem")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210126172202.1428367-1-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ .../surface/aggregator/ssh_packet_layer.c     | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+index 74f0faaa2b27..583315db8b02 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -1694,7 +1694,24 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+ 	/* Find SYN. */
+ 	syn_found = sshp_find_syn(source, &aligned);
+ 
+-	if (unlikely(aligned.ptr - source->ptr) > 0) {
++	if (unlikely(aligned.ptr != source->ptr)) {
++		/*
++		 * We expect aligned.ptr == source->ptr. If this is not the
++		 * case, then aligned.ptr > source->ptr and we've encountered
++		 * some unexpected data where we'd expect the start of a new
++		 * message (i.e. the SYN sequence).
++		 *
++		 * This can happen when a CRC check for the previous message
++		 * failed and we start actively searching for the next one
++		 * (via the call to sshp_find_syn() above), or the first bytes
++		 * of a message got dropped or corrupted.
++		 *
++		 * In any case, we issue a warning, send a NAK to the EC to
++		 * request re-transmission of any data we haven't acknowledged
++		 * yet, and finally, skip everything up to the next SYN
++		 * sequence.
++		 */
++
+ 		ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
+ 
+ 		/*
+-- 
+2.30.1
+
+From c81c70309a61976d024c2cb898bc16fd19686d63 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 11 Feb 2021 13:41:49 +0100
+Subject: [PATCH] platform/surface: aggregator: Fix access of unaligned value
+
+The raw message frame length is unaligned and explicitly marked as
+little endian. It should not be accessed without the appropriate
+accessor functions. Fix this.
+
+Note that payload.len already contains the correct length after parsing
+via sshp_parse_frame(), so we can simply use that instead.
+
+Reported-by: kernel-test-robot <lkp@intel.com>
+Fixes: c167b9c7e3d6 ("platform/surface: Add Surface Aggregator subsystem")
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Acked-by: Mark Gross <mgross@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20210211124149.2439007-1-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-sam
+---
+ drivers/platform/surface/aggregator/ssh_packet_layer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+index 583315db8b02..15d96eac6811 100644
+--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
+@@ -1774,7 +1774,7 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+ 		break;
+ 	}
+ 
+-	return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
++	return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len);
+ }
+ 
+ static int ssh_ptl_rx_threadfn(void *data)
+-- 
+2.30.1
+
+From 245b63acc338f0d5998234bab3c5a0039fe8de85 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 03:42:45 +0100
+Subject: [PATCH] platform/surface: Set up Surface Aggregator device registry
+
+The Surface System Aggregator Module (SSAM) subsystem provides various
+functionalities, which are separated by spreading them across multiple
+devices and corresponding drivers. Parts of that functionality / some of
+those devices, however, can (as far as we currently know) not be
+auto-detected by conventional means. While older (specifically 5th- and
+6th-)generation models do advertise most of their functionality via
+standard platform devices in ACPI, newer generations do not.
+
+As we are currently also not aware of any feasible way to query said
+functionalities dynamically, this poses a problem. There is, however, a
+device in ACPI that seems to be used by Windows for identifying
+different Surface models: The Windows Surface Integration Device (WSID).
+This device seems to have a HID corresponding to the overall set of
+functionalities SSAM provides for the associated model.
+
+This commit introduces a registry providing non-detectable device
+information via software nodes. In addition, a SSAM platform hub driver
+is introduced, which takes care of creating and managing the SSAM
+devices specified in this registry. This approach allows for a
+hierarchical setup akin to ACPI and is easily extendable, e.g. via
+firmware node properties.
+
+Note that this commit only provides the basis for the platform hub and
+registry, and does not add any content to it. The registry will be
+expanded in subsequent commits.
+
+Patchset: surface-sam
+---
+ MAINTAINERS                                   |   1 +
+ drivers/platform/surface/Kconfig              |  27 ++
+ drivers/platform/surface/Makefile             |   1 +
+ .../surface/surface_aggregator_registry.c     | 284 ++++++++++++++++++
+ 4 files changed, 313 insertions(+)
+ create mode 100644 drivers/platform/surface/surface_aggregator_registry.c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d5fe6fdb0341..48e54650e28e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11816,6 +11816,7 @@ F:	Documentation/driver-api/surface_aggregator/
+ F:	drivers/platform/surface/aggregator/
+ F:	drivers/platform/surface/surface_acpi_notify.c
+ F:	drivers/platform/surface/surface_aggregator_cdev.c
++F:	drivers/platform/surface/surface_aggregator_registry.c
+ F:	include/linux/surface_acpi_notify.h
+ F:	include/linux/surface_aggregator/
+ F:	include/uapi/linux/surface_aggregator/
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index b0b91fa2f6a1..97e08dd35992 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -77,6 +77,33 @@ config SURFACE_AGGREGATOR_CDEV
+ 	  The provided interface is intended for debugging and development only,
+ 	  and should not be used otherwise.
+ 
++config SURFACE_AGGREGATOR_REGISTRY
++	tristate "Surface System Aggregator Module Device Registry"
++	depends on SURFACE_AGGREGATOR
++	depends on SURFACE_AGGREGATOR_BUS
++	help
++	  Device-registry and device-hubs for Surface System Aggregator Module
++	  (SSAM) devices.
++
++	  Provides a module and driver which act as a device-registry for SSAM
++	  client devices that cannot be detected automatically, e.g. via ACPI.
++	  Such devices are instead provided via this registry and attached via
++	  device hubs, also provided in this module.
++
++	  Devices provided via this registry are:
++	  - Platform profile (performance-/cooling-mode) device (5th- and later
++	    generations).
++	  - Battery/AC devices (7th-generation).
++	  - HID input devices (7th-generation).
++
++	  Select M (recommended) or Y here if you want support for the above
++	  mentioned devices on the corresponding Surface models. Without this
++	  module, the respective devices will not be instantiated and thus any
++	  functionality provided by them will be missing, even when drivers for
++	  these devices are present. In other words, this module only provides
++	  the respective client devices. Drivers for these devices still need to
++	  be selected via the other options.
++
+ config SURFACE_BOOK1_DGPU_SWITCH
+ 	tristate "Surface Book 1 dGPU Switch Driver"
+ 	depends on SYSFS
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 72f4d9fbb6be..30a212aefd35 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -10,6 +10,7 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
+ obj-$(CONFIG_SURFACE_ACPI_NOTIFY)	+= surface_acpi_notify.o
+ obj-$(CONFIG_SURFACE_AGGREGATOR)	+= aggregator/
+ obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)	+= surface_aggregator_cdev.o
++obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+new file mode 100644
+index 000000000000..a051d941ad96
+--- /dev/null
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -0,0 +1,284 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module (SSAM) client device registry.
++ *
++ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
++ * cannot be auto-detected. Provides device-hubs and performs instantiation
++ * for these devices.
++ *
++ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++
++/* -- Device registry. ------------------------------------------------------ */
++
++/*
++ * SSAM device names follow the SSAM module alias, meaning they are prefixed
++ * with 'ssam:', followed by domain, category, target ID, instance ID, and
++ * function, each encoded as two-digit hexadecimal, separated by ':'. In other
++ * words, it follows the scheme
++ *
++ *      ssam:dd:cc:tt:ii:ff
++ *
++ * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
++ * values mentioned above, respectively.
++ */
++
++/* Root node. */
++static const struct software_node ssam_node_root = {
++	.name = "ssam_platform_hub",
++};
++
++/* Devices for Surface Book 2. */
++static const struct software_node *ssam_node_group_sb2[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Book 3. */
++static const struct software_node *ssam_node_group_sb3[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Laptop 1. */
++static const struct software_node *ssam_node_group_sl1[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Laptop 2. */
++static const struct software_node *ssam_node_group_sl2[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Laptop 3. */
++static const struct software_node *ssam_node_group_sl3[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Laptop Go. */
++static const struct software_node *ssam_node_group_slg1[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Pro 5. */
++static const struct software_node *ssam_node_group_sp5[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Pro 6. */
++static const struct software_node *ssam_node_group_sp6[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++/* Devices for Surface Pro 7. */
++static const struct software_node *ssam_node_group_sp7[] = {
++	&ssam_node_root,
++	NULL,
++};
++
++
++/* -- Device registry helper functions. ------------------------------------- */
++
++static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
++{
++	u8 d, tc, tid, iid, fn;
++	int n;
++
++	n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
++	if (n != 5)
++		return -EINVAL;
++
++	uid->domain = d;
++	uid->category = tc;
++	uid->target = tid;
++	uid->instance = iid;
++	uid->function = fn;
++
++	return 0;
++}
++
++static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
++{
++	if (!is_ssam_device(dev))
++		return 0;
++
++	ssam_device_remove(to_ssam_device(dev));
++	return 0;
++}
++
++static void ssam_hub_remove_devices(struct device *parent)
++{
++	device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
++}
++
++static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
++			       struct fwnode_handle *node)
++{
++	struct ssam_device_uid uid;
++	struct ssam_device *sdev;
++	int status;
++
++	status = ssam_uid_from_string(fwnode_get_name(node), &uid);
++	if (status)
++		return status;
++
++	sdev = ssam_device_alloc(ctrl, uid);
++	if (!sdev)
++		return -ENOMEM;
++
++	sdev->dev.parent = parent;
++	sdev->dev.fwnode = node;
++
++	status = ssam_device_add(sdev);
++	if (status)
++		ssam_device_put(sdev);
++
++	return status;
++}
++
++static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *ctrl,
++				struct fwnode_handle *node)
++{
++	struct fwnode_handle *child;
++	int status;
++
++	fwnode_for_each_child_node(node, child) {
++		/*
++		 * Try to add the device specified in the firmware node. If
++		 * this fails with -EINVAL, the node does not specify any SSAM
++		 * device, so ignore it and continue with the next one.
++		 */
++
++		status = ssam_hub_add_device(parent, ctrl, child);
++		if (status && status != -EINVAL)
++			goto err;
++	}
++
++	return 0;
++err:
++	ssam_hub_remove_devices(parent);
++	return status;
++}
++
++
++/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
++
++static const struct acpi_device_id ssam_platform_hub_match[] = {
++	/* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
++	{ "MSHW0081", (unsigned long)ssam_node_group_sp5 },
++
++	/* Surface Pro 6 (OMBR >= 0x10) */
++	{ "MSHW0111", (unsigned long)ssam_node_group_sp6 },
++
++	/* Surface Pro 7 */
++	{ "MSHW0116", (unsigned long)ssam_node_group_sp7 },
++
++	/* Surface Book 2 */
++	{ "MSHW0107", (unsigned long)ssam_node_group_sb2 },
++
++	/* Surface Book 3 */
++	{ "MSHW0117", (unsigned long)ssam_node_group_sb3 },
++
++	/* Surface Laptop 1 */
++	{ "MSHW0086", (unsigned long)ssam_node_group_sl1 },
++
++	/* Surface Laptop 2 */
++	{ "MSHW0112", (unsigned long)ssam_node_group_sl2 },
++
++	/* Surface Laptop 3 (13", Intel) */
++	{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
++
++	/* Surface Laptop 3 (15", AMD) */
++	{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
++
++	/* Surface Laptop Go 1 */
++	{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
++
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
++
++static int ssam_platform_hub_probe(struct platform_device *pdev)
++{
++	const struct software_node **nodes;
++	struct ssam_controller *ctrl;
++	struct fwnode_handle *root;
++	int status;
++
++	nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
++	if (!nodes)
++		return -ENODEV;
++
++	/*
++	 * As we're adding the SSAM client devices as children under this device
++	 * and not the SSAM controller, we need to add a device link to the
++	 * controller to ensure that we remove all of our devices before the
++	 * controller is removed. This also guarantees proper ordering for
++	 * suspend/resume of the devices on this hub.
++	 */
++	ctrl = ssam_client_bind(&pdev->dev);
++	if (IS_ERR(ctrl))
++		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++	status = software_node_register_node_group(nodes);
++	if (status)
++		return status;
++
++	root = software_node_fwnode(&ssam_node_root);
++	if (!root) {
++		software_node_unregister_node_group(nodes);
++		return -ENOENT;
++	}
++
++	set_secondary_fwnode(&pdev->dev, root);
++
++	status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
++	if (status) {
++		set_secondary_fwnode(&pdev->dev, NULL);
++		software_node_unregister_node_group(nodes);
++	}
++
++	platform_set_drvdata(pdev, nodes);
++	return status;
++}
++
++static int ssam_platform_hub_remove(struct platform_device *pdev)
++{
++	const struct software_node **nodes = platform_get_drvdata(pdev);
++
++	ssam_hub_remove_devices(&pdev->dev);
++	set_secondary_fwnode(&pdev->dev, NULL);
++	software_node_unregister_node_group(nodes);
++	return 0;
++}
++
++static struct platform_driver ssam_platform_hub_driver = {
++	.probe = ssam_platform_hub_probe,
++	.remove = ssam_platform_hub_remove,
++	.driver = {
++		.name = "surface_aggregator_platform_hub",
++		.acpi_match_table = ssam_platform_hub_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(ssam_platform_hub_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+
+From 7ffa29669f90c35561bd0b9131b60f6ad56c7981 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 04:14:35 +0100
+Subject: [PATCH] platform/surface: aggregator_registry: Add base device hub
+
+The Surface Book 3 has a detachable base part. While the top part
+(so-called clipboard) contains the CPU, touchscreen, and primary
+battery, the base contains, among other things, a keyboard, touchpad,
+and secondary battery.
+
+Those devices do not react well to being accessed when the base part is
+detached and should thus be removed and added in sync with the base. To
+facilitate this, we introduce a virtual base device hub, which
+automatically removes or adds the devices registered under it.
+
+Patchset: surface-sam
+---
+ .../surface/surface_aggregator_registry.c     | 261 +++++++++++++++++-
+ 1 file changed, 260 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index a051d941ad96..6c23d75a044c 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -11,9 +11,12 @@
+ 
+ #include <linux/acpi.h>
+ #include <linux/kernel.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/platform_device.h>
+ #include <linux/property.h>
++#include <linux/types.h>
+ 
+ #include <linux/surface_aggregator/controller.h>
+ #include <linux/surface_aggregator/device.h>
+@@ -38,6 +41,12 @@ static const struct software_node ssam_node_root = {
+ 	.name = "ssam_platform_hub",
+ };
+ 
++/* Base device hub (devices attached to Surface Book 3 base). */
++static const struct software_node ssam_node_hub_base = {
++	.name = "ssam:00:00:02:00:00",
++	.parent = &ssam_node_root,
++};
++
+ /* Devices for Surface Book 2. */
+ static const struct software_node *ssam_node_group_sb2[] = {
+ 	&ssam_node_root,
+@@ -47,6 +56,7 @@ static const struct software_node *ssam_node_group_sb2[] = {
+ /* Devices for Surface Book 3. */
+ static const struct software_node *ssam_node_group_sb3[] = {
+ 	&ssam_node_root,
++	&ssam_node_hub_base,
+ 	NULL,
+ };
+ 
+@@ -177,6 +187,230 @@ static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *c
+ }
+ 
+ 
++/* -- SSAM base-hub driver. ------------------------------------------------- */
++
++enum ssam_base_hub_state {
++	SSAM_BASE_HUB_UNINITIALIZED,
++	SSAM_BASE_HUB_CONNECTED,
++	SSAM_BASE_HUB_DISCONNECTED,
++};
++
++struct ssam_base_hub {
++	struct ssam_device *sdev;
++
++	struct mutex lock;  /* Guards state update checks and transitions. */
++	enum ssam_base_hub_state state;
++
++	struct ssam_event_notifier notif;
++};
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0d,
++	.instance_id     = 0x00,
++});
++
++#define SSAM_BAS_OPMODE_TABLET		0x00
++#define SSAM_EVENT_BAS_CID_CONNECTION	0x0c
++
++static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
++{
++	u8 opmode;
++	int status;
++
++	status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
++	if (status < 0) {
++		dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
++		return status;
++	}
++
++	if (opmode != SSAM_BAS_OPMODE_TABLET)
++		*state = SSAM_BASE_HUB_CONNECTED;
++	else
++		*state = SSAM_BASE_HUB_DISCONNECTED;
++
++	return 0;
++}
++
++static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
++					char *buf)
++{
++	struct ssam_base_hub *hub = dev_get_drvdata(dev);
++	bool connected;
++
++	mutex_lock(&hub->lock);
++	connected = hub->state == SSAM_BASE_HUB_CONNECTED;
++	mutex_unlock(&hub->lock);
++
++	return sysfs_emit(buf, "%d\n", connected);
++}
++
++static struct device_attribute ssam_base_hub_attr_state =
++	__ATTR(state, 0444, ssam_base_hub_state_show, NULL);
++
++static struct attribute *ssam_base_hub_attrs[] = {
++	&ssam_base_hub_attr_state.attr,
++	NULL,
++};
++
++const struct attribute_group ssam_base_hub_group = {
++	.attrs = ssam_base_hub_attrs,
++};
++
++static int __ssam_base_hub_update(struct ssam_base_hub *hub, enum ssam_base_hub_state new)
++{
++	struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
++	int status = 0;
++
++	lockdep_assert_held(&hub->lock);
++
++	if (hub->state == new)
++		return 0;
++	hub->state = new;
++
++	if (hub->state == SSAM_BASE_HUB_CONNECTED)
++		status = ssam_hub_add_devices(&hub->sdev->dev, hub->sdev->ctrl, node);
++	else
++		ssam_hub_remove_devices(&hub->sdev->dev);
++
++	if (status)
++		dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
++
++	return status;
++}
++
++static int ssam_base_hub_update(struct ssam_base_hub *hub)
++{
++	enum ssam_base_hub_state state;
++	int status;
++
++	mutex_lock(&hub->lock);
++
++	status = ssam_base_hub_query_state(hub, &state);
++	if (!status)
++		status = __ssam_base_hub_update(hub, state);
++
++	mutex_unlock(&hub->lock);
++	return status;
++}
++
++static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
++{
++	struct ssam_base_hub *hub;
++	struct ssam_device *sdev;
++	enum ssam_base_hub_state new;
++
++	hub = container_of(nf, struct ssam_base_hub, notif);
++	sdev = hub->sdev;
++
++	if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
++		return 0;
++
++	if (event->length < 1) {
++		dev_err(&sdev->dev, "unexpected payload size: %u\n",
++			event->length);
++		return 0;
++	}
++
++	if (event->data[0])
++		new = SSAM_BASE_HUB_CONNECTED;
++	else
++		new = SSAM_BASE_HUB_DISCONNECTED;
++
++	mutex_lock(&hub->lock);
++	__ssam_base_hub_update(hub, new);
++	mutex_unlock(&hub->lock);
++
++	/*
++	 * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
++	 * consumed by the detachment system driver. We're just a (more or less)
++	 * silent observer.
++	 */
++	return 0;
++}
++
++static int __maybe_unused ssam_base_hub_resume(struct device *dev)
++{
++	return ssam_base_hub_update(dev_get_drvdata(dev));
++}
++static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
++
++static int ssam_base_hub_probe(struct ssam_device *sdev)
++{
++	struct ssam_base_hub *hub;
++	int status;
++
++	hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
++	if (!hub)
++		return -ENOMEM;
++
++	mutex_init(&hub->lock);
++
++	hub->sdev = sdev;
++	hub->state = SSAM_BASE_HUB_UNINITIALIZED;
++
++	hub->notif.base.priority = INT_MAX;  /* This notifier should run first. */
++	hub->notif.base.fn = ssam_base_hub_notif;
++	hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
++	hub->notif.event.id.instance = 0,
++	hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	ssam_device_set_drvdata(sdev, hub);
++
++	status = ssam_notifier_register(sdev->ctrl, &hub->notif);
++	if (status)
++		goto err_register;
++
++	status = ssam_base_hub_update(hub);
++	if (status)
++		goto err_update;
++
++	status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
++	if (status)
++		goto err_update;
++
++	return 0;
++
++err_update:
++	ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++	ssam_hub_remove_devices(&sdev->dev);
++err_register:
++	mutex_destroy(&hub->lock);
++	return status;
++}
++
++static void ssam_base_hub_remove(struct ssam_device *sdev)
++{
++	struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
++
++	sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
++
++	ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++	ssam_hub_remove_devices(&sdev->dev);
++
++	mutex_destroy(&hub->lock);
++}
++
++static const struct ssam_device_id ssam_base_hub_match[] = {
++	{ SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
++	{ },
++};
++
++static struct ssam_device_driver ssam_base_hub_driver = {
++	.probe = ssam_base_hub_probe,
++	.remove = ssam_base_hub_remove,
++	.match_table = ssam_base_hub_match,
++	.driver = {
++		.name = "surface_aggregator_base_hub",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++		.pm = &ssam_base_hub_pm_ops,
++	},
++};
++
++
+ /* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+ 
+ static const struct acpi_device_id ssam_platform_hub_match[] = {
+@@ -277,7 +511,32 @@ static struct platform_driver ssam_platform_hub_driver = {
+ 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ 	},
+ };
+-module_platform_driver(ssam_platform_hub_driver);
++
++
++/* -- Module initialization. ------------------------------------------------ */
++
++static int __init ssam_device_hub_init(void)
++{
++	int status;
++
++	status = platform_driver_register(&ssam_platform_hub_driver);
++	if (status)
++		return status;
++
++	status = ssam_device_driver_register(&ssam_base_hub_driver);
++	if (status)
++		platform_driver_unregister(&ssam_platform_hub_driver);
++
++	return status;
++}
++module_init(ssam_device_hub_init);
++
++static void __exit ssam_device_hub_exit(void)
++{
++	ssam_device_driver_unregister(&ssam_base_hub_driver);
++	platform_driver_unregister(&ssam_platform_hub_driver);
++}
++module_exit(ssam_device_hub_exit);
+ 
+ MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+ MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
+-- 
+2.30.1
+
+From 3ef39d45e162a91ef20d41e4584f22213e838ac7 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 04:55:09 +0100
+Subject: [PATCH] platform/surface: aggregator_registry: Add battery subsystem
+ devices
+
+Add battery subsystem (TC=0x02) devices (battery and AC) to the SSAM
+device registry. These devices need to be registered for 7th-generation
+Surface models. On 5th- and 6th-generation models, these devices are
+handled via the standard ACPI battery/AC interface, which in turn
+accesses the same SSAM interface via the Surface ACPI Notify (SAN)
+driver.
+
+Patchset: surface-sam
+---
+ .../surface/surface_aggregator_registry.c     | 27 +++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 6c23d75a044c..cde279692842 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -47,6 +47,24 @@ static const struct software_node ssam_node_hub_base = {
+ 	.parent = &ssam_node_root,
+ };
+ 
++/* AC adapter. */
++static const struct software_node ssam_node_bat_ac = {
++	.name = "ssam:01:02:01:01:01",
++	.parent = &ssam_node_root,
++};
++
++/* Primary battery. */
++static const struct software_node ssam_node_bat_main = {
++	.name = "ssam:01:02:01:01:00",
++	.parent = &ssam_node_root,
++};
++
++/* Secondary battery (Surface Book 3). */
++static const struct software_node ssam_node_bat_sb3base = {
++	.name = "ssam:01:02:02:01:00",
++	.parent = &ssam_node_hub_base,
++};
++
+ /* Devices for Surface Book 2. */
+ static const struct software_node *ssam_node_group_sb2[] = {
+ 	&ssam_node_root,
+@@ -57,6 +75,9 @@ static const struct software_node *ssam_node_group_sb2[] = {
+ static const struct software_node *ssam_node_group_sb3[] = {
+ 	&ssam_node_root,
+ 	&ssam_node_hub_base,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
++	&ssam_node_bat_sb3base,
+ 	NULL,
+ };
+ 
+@@ -75,12 +96,16 @@ static const struct software_node *ssam_node_group_sl2[] = {
+ /* Devices for Surface Laptop 3. */
+ static const struct software_node *ssam_node_group_sl3[] = {
+ 	&ssam_node_root,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
+ 	NULL,
+ };
+ 
+ /* Devices for Surface Laptop Go. */
+ static const struct software_node *ssam_node_group_slg1[] = {
+ 	&ssam_node_root,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
+ 	NULL,
+ };
+ 
+@@ -99,6 +124,8 @@ static const struct software_node *ssam_node_group_sp6[] = {
+ /* Devices for Surface Pro 7. */
+ static const struct software_node *ssam_node_group_sp7[] = {
+ 	&ssam_node_root,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
+ 	NULL,
+ };
+ 
+-- 
+2.30.1
+
+From 7e71870e30d8a2c0539f3c3a97b77d7dfcbcbfa4 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 05:01:08 +0100
+Subject: [PATCH] platform/surface: aggregator_registry: Add platform profile
+ device
+
+Add the SSAM platform profile device to the SSAM device registry. This
+device is accessible under the thermal subsystem (TC=0x03) and needs to
+be registered for all Surface models.
+
+Patchset: surface-sam
+---
+ .../surface/surface_aggregator_registry.c         | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index cde279692842..33904613dd4b 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -65,9 +65,16 @@ static const struct software_node ssam_node_bat_sb3base = {
+ 	.parent = &ssam_node_hub_base,
+ };
+ 
++/* Platform profile / performance-mode device. */
++static const struct software_node ssam_node_tmp_pprof = {
++	.name = "ssam:01:03:01:00:01",
++	.parent = &ssam_node_root,
++};
++
+ /* Devices for Surface Book 2. */
+ static const struct software_node *ssam_node_group_sb2[] = {
+ 	&ssam_node_root,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+@@ -78,18 +85,21 @@ static const struct software_node *ssam_node_group_sb3[] = {
+ 	&ssam_node_bat_ac,
+ 	&ssam_node_bat_main,
+ 	&ssam_node_bat_sb3base,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+ /* Devices for Surface Laptop 1. */
+ static const struct software_node *ssam_node_group_sl1[] = {
+ 	&ssam_node_root,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+ /* Devices for Surface Laptop 2. */
+ static const struct software_node *ssam_node_group_sl2[] = {
+ 	&ssam_node_root,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+@@ -98,6 +108,7 @@ static const struct software_node *ssam_node_group_sl3[] = {
+ 	&ssam_node_root,
+ 	&ssam_node_bat_ac,
+ 	&ssam_node_bat_main,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+@@ -106,18 +117,21 @@ static const struct software_node *ssam_node_group_slg1[] = {
+ 	&ssam_node_root,
+ 	&ssam_node_bat_ac,
+ 	&ssam_node_bat_main,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+ /* Devices for Surface Pro 5. */
+ static const struct software_node *ssam_node_group_sp5[] = {
+ 	&ssam_node_root,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+ /* Devices for Surface Pro 6. */
+ static const struct software_node *ssam_node_group_sp6[] = {
+ 	&ssam_node_root,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+@@ -126,6 +140,7 @@ static const struct software_node *ssam_node_group_sp7[] = {
+ 	&ssam_node_root,
+ 	&ssam_node_bat_ac,
+ 	&ssam_node_bat_main,
++	&ssam_node_tmp_pprof,
+ 	NULL,
+ };
+ 
+-- 
+2.30.1
+
+From 813f8ad53117549bfc1dce60b8700cecb76dff89 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 05:06:41 +0100
+Subject: [PATCH] platform/surface: aggregator_registry: Add DTX device
+
+Add the detachment system (DTX) SSAM device for the Surface Book 3. This
+device is accessible under the base (TC=0x11) subsystem.
+
+Patchset: surface-sam
+---
+ drivers/platform/surface/surface_aggregator_registry.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 33904613dd4b..dc044d06828b 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -71,6 +71,12 @@ static const struct software_node ssam_node_tmp_pprof = {
+ 	.parent = &ssam_node_root,
+ };
+ 
++/* DTX / detachment-system device (Surface Book 3). */
++static const struct software_node ssam_node_bas_dtx = {
++	.name = "ssam:01:11:01:00:00",
++	.parent = &ssam_node_root,
++};
++
+ /* Devices for Surface Book 2. */
+ static const struct software_node *ssam_node_group_sb2[] = {
+ 	&ssam_node_root,
+@@ -86,6 +92,7 @@ static const struct software_node *ssam_node_group_sb3[] = {
+ 	&ssam_node_bat_main,
+ 	&ssam_node_bat_sb3base,
+ 	&ssam_node_tmp_pprof,
++	&ssam_node_bas_dtx,
+ 	NULL,
+ };
+ 
+-- 
+2.30.1
+
+From 603eba200fad0b5480c15ca0463166f6aee93264 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 7 Feb 2021 05:16:44 +0100
+Subject: [PATCH] platform/surface: aggregator_registry: Add HID subsystem
+ devices
+
+Add HID subsystem (TC=0x15) devices. These devices need to be registered
+for 7th-generation Surface models. On previous generations, these
+devices are either provided as platform devices via ACPI (Surface Laptop
+1 and 2) or implemented as standard USB device.
+
+Patchset: surface-sam
+---
+ .../surface/surface_aggregator_registry.c     | 49 +++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index dc044d06828b..caee90d135c5 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -77,6 +77,48 @@ static const struct software_node ssam_node_bas_dtx = {
+ 	.parent = &ssam_node_root,
+ };
+ 
++/* HID keyboard. */
++static const struct software_node ssam_node_hid_main_keyboard = {
++	.name = "ssam:01:15:02:01:00",
++	.parent = &ssam_node_root,
++};
++
++/* HID touchpad. */
++static const struct software_node ssam_node_hid_main_touchpad = {
++	.name = "ssam:01:15:02:03:00",
++	.parent = &ssam_node_root,
++};
++
++/* HID device instance 5 (unknown HID device). */
++static const struct software_node ssam_node_hid_main_iid5 = {
++	.name = "ssam:01:15:02:05:00",
++	.parent = &ssam_node_root,
++};
++
++/* HID keyboard (base hub). */
++static const struct software_node ssam_node_hid_base_keyboard = {
++	.name = "ssam:01:15:02:01:00",
++	.parent = &ssam_node_hub_base,
++};
++
++/* HID touchpad (base hub). */
++static const struct software_node ssam_node_hid_base_touchpad = {
++	.name = "ssam:01:15:02:03:00",
++	.parent = &ssam_node_hub_base,
++};
++
++/* HID device instance 5 (unknown HID device, base hub). */
++static const struct software_node ssam_node_hid_base_iid5 = {
++	.name = "ssam:01:15:02:05:00",
++	.parent = &ssam_node_hub_base,
++};
++
++/* HID device instance 6 (unknown HID device, base hub). */
++static const struct software_node ssam_node_hid_base_iid6 = {
++	.name = "ssam:01:15:02:06:00",
++	.parent = &ssam_node_hub_base,
++};
++
+ /* Devices for Surface Book 2. */
+ static const struct software_node *ssam_node_group_sb2[] = {
+ 	&ssam_node_root,
+@@ -93,6 +135,10 @@ static const struct software_node *ssam_node_group_sb3[] = {
+ 	&ssam_node_bat_sb3base,
+ 	&ssam_node_tmp_pprof,
+ 	&ssam_node_bas_dtx,
++	&ssam_node_hid_base_keyboard,
++	&ssam_node_hid_base_touchpad,
++	&ssam_node_hid_base_iid5,
++	&ssam_node_hid_base_iid6,
+ 	NULL,
+ };
+ 
+@@ -116,6 +162,9 @@ static const struct software_node *ssam_node_group_sl3[] = {
+ 	&ssam_node_bat_ac,
+ 	&ssam_node_bat_main,
+ 	&ssam_node_tmp_pprof,
++	&ssam_node_hid_main_keyboard,
++	&ssam_node_hid_main_touchpad,
++	&ssam_node_hid_main_iid5,
+ 	NULL,
+ };
+ 
+-- 
+2.30.1
+
+From fe31d5f5e6c75f2c6619b3b0b52d5083eaca78da Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Tue, 9 Feb 2021 02:46:40 +0100
+Subject: [PATCH] platform/surface: Add DTX driver
+
+The Microsoft Surface Book series devices consist of a so-called
+clipboard part (containing the CPU, touchscreen, and primary battery)
+and a base part (containing keyboard, secondary battery, and optional
+discrete GPU). These parts can be separated, i.e. the clipboard can be
+detached and used as tablet.
+
+This detachment process is initiated by pressing a button. On the
+Surface Book 2 and 3 (targeted with this commit), the Surface Aggregator
+Module (i.e. the embedded controller on those devices) attempts to send
+a notification to any listening client driver and waits for further
+instructions (i.e. whether the detachment process should continue or be
+aborted). If it does not receive a response in a certain time-frame, the
+detachment process (by default) continues and the clipboard can be
+physically separated. In other words, (by default and) without a driver,
+the detachment process takes about 10 seconds to complete.
+
+This commit introduces a driver for this detachment system (called DTX).
+This driver allows a user-space daemon to control and influence the
+detachment behavior. Specifically, it forwards any detachment requests
+to user-space, allows user-space to make such requests itself, and
+allows handling of those requests. Requests can be handled by either
+aborting, continuing/allowing, or delaying (i.e. resetting the timeout
+via a heartbeat commend). The user-space API is implemented via the
+/dev/surface/dtx miscdevice.
+
+In addition, user-space can change the default behavior on timeout from
+allowing detachment to disallowing it, which is useful if the (optional)
+discrete GPU is in use.
+
+Furthermore, this driver allows user-space to receive notifications
+about the state of the base, specifically when it is physically removed
+(as opposed to detachment requested), in what manner it is connected
+(i.e. in reverse-/tent-/studio- or laptop-mode), and what type of base
+is connected. Based on this information, the driver also provides a
+simple tablet-mode switch (aliasing all modes without keyboard access,
+i.e. tablet-mode and studio-mode to its reported tablet-mode).
+
+An implementation of such a user-space daemon, allowing configuration of
+detachment behavior via scripts (e.g. safely unmounting USB devices
+connected to the base before continuing) can be found at [1].
+
+[1]: https://github.com/linux-surface/surface-dtx-daemon
+
+Patchset: surface-sam
+---
+ .../userspace-api/ioctl/ioctl-number.rst      |    2 +
+ MAINTAINERS                                   |    7 +
+ drivers/platform/surface/Kconfig              |   16 +
+ drivers/platform/surface/Makefile             |    1 +
+ drivers/platform/surface/surface_dtx.c        | 1201 +++++++++++++++++
+ include/uapi/linux/surface_aggregator/dtx.h   |  146 ++
+ 6 files changed, 1373 insertions(+)
+ create mode 100644 drivers/platform/surface/surface_dtx.c
+ create mode 100644 include/uapi/linux/surface_aggregator/dtx.h
+
+diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
+index b5231d7f9200..e1dc72a8b62e 100644
+--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
++++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
+@@ -326,6 +326,8 @@ Code  Seq#    Include File                                           Comments
+ 0xA4  00-1F  uapi/asm/sgx.h                                          <mailto:linux-sgx@vger.kernel.org>
+ 0xA5  01     linux/surface_aggregator/cdev.h                         Microsoft Surface Platform System Aggregator
+                                                                      <mailto:luzmaximilian@gmail.com>
++0xA5  20-2F  linux/surface_aggregator/dtx.h                          Microsoft Surface DTX driver
++                                                                     <mailto:luzmaximilian@gmail.com>
+ 0xAA  00-3F  linux/uapi/linux/userfaultfd.h
+ 0xAB  00-1F  linux/nbd.h
+ 0xAC  00-1F  linux/raw.h
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 48e54650e28e..8ea459fc56f4 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11786,6 +11786,13 @@ F:	drivers/scsi/smartpqi/smartpqi*.[ch]
+ F:	include/linux/cciss*.h
+ F:	include/uapi/linux/cciss*.h
+ 
++MICROSOFT SURFACE DTX DRIVER
++M:	Maximilian Luz <luzmaximilian@gmail.com>
++L:	platform-driver-x86@vger.kernel.org
++S:	Maintained
++F:	drivers/platform/surface/surface_dtx.c
++F:	include/uapi/linux/surface_aggregator/dtx.h
++
+ MICROSOFT SURFACE GPE LID SUPPORT DRIVER
+ M:	Maximilian Luz <luzmaximilian@gmail.com>
+ L:	platform-driver-x86@vger.kernel.org
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index 97e08dd35992..745f9d2eb6a7 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -111,6 +111,22 @@ config SURFACE_BOOK1_DGPU_SWITCH
+ 	  This driver provides a sysfs switch to set the power-state of the
+ 	  discrete GPU found on the Microsoft Surface Book 1.
+ 
++config SURFACE_DTX
++	tristate "Surface DTX (Detachment System) Driver"
++	depends on SURFACE_AGGREGATOR
++	depends on INPUT
++	help
++	  Driver for the Surface Book clipboard detachment system (DTX).
++
++	  On the Surface Book series devices, the display part containing the
++	  CPU (called the clipboard) can be detached from the base (containing a
++	  battery, the keyboard, and, optionally, a discrete GPU) by (if
++	  necessary) unlocking and opening the latch connecting both parts.
++
++	  This driver provides a user-space interface that can influence the
++	  behavior of this process, which includes the option to abort it in
++	  case the base is still in use or speed it up in case it is not.
++
+ config SURFACE_GPE
+ 	tristate "Surface GPE/Lid Support Driver"
+ 	depends on DMI
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 30a212aefd35..19b661e274c3 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -12,5 +12,6 @@ obj-$(CONFIG_SURFACE_AGGREGATOR)	+= aggregator/
+ obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)	+= surface_aggregator_cdev.o
+ obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
++obj-$(CONFIG_SURFACE_DTX)		+= surface_dtx.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
+new file mode 100644
+index 000000000000..a95adc1094aa
+--- /dev/null
++++ b/drivers/platform/surface/surface_dtx.c
+@@ -0,0 +1,1201 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
++ *
++ * Provides a user-space interface to properly handle clipboard/tablet
++ * (containing screen and processor) detachment from the base of the device
++ * (containing the keyboard and optionally a discrete GPU). Allows to
++ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
++ * use), or request detachment via user-space.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/fs.h>
++#include <linux/input.h>
++#include <linux/ioctl.h>
++#include <linux/kernel.h>
++#include <linux/kfifo.h>
++#include <linux/kref.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/rwsem.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/dtx.h>
++
++
++/* -- SSAM interface. ------------------------------------------------------- */
++
++enum sam_event_cid_bas {
++	SAM_EVENT_CID_DTX_CONNECTION			= 0x0c,
++	SAM_EVENT_CID_DTX_REQUEST			= 0x0e,
++	SAM_EVENT_CID_DTX_CANCEL			= 0x0f,
++	SAM_EVENT_CID_DTX_LATCH_STATUS			= 0x11,
++};
++
++enum ssam_bas_base_state {
++	SSAM_BAS_BASE_STATE_DETACH_SUCCESS		= 0x00,
++	SSAM_BAS_BASE_STATE_ATTACHED			= 0x01,
++	SSAM_BAS_BASE_STATE_NOT_FEASIBLE		= 0x02,
++};
++
++enum ssam_bas_latch_status {
++	SSAM_BAS_LATCH_STATUS_CLOSED			= 0x00,
++	SSAM_BAS_LATCH_STATUS_OPENED			= 0x01,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN		= 0x02,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN	= 0x03,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE		= 0x04,
++};
++
++enum ssam_bas_cancel_reason {
++	SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE		= 0x00,  /* Low battery. */
++	SSAM_BAS_CANCEL_REASON_TIMEOUT			= 0x02,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN		= 0x03,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN	= 0x04,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE		= 0x05,
++};
++
++struct ssam_bas_base_info {
++	u8 state;
++	u8 base_id;
++} __packed;
++
++static_assert(sizeof(struct ssam_bas_base_info) == 2);
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x06,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x07,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x08,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x09,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0a,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0b,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0c,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0d,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x11,
++	.instance_id     = 0x00,
++});
++
++
++/* -- Main structures. ------------------------------------------------------ */
++
++enum sdtx_device_state {
++	SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
++	SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
++	SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
++	SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
++};
++
++struct sdtx_device {
++	struct kref kref;
++	struct rw_semaphore lock;         /* Guards device and controller reference. */
++
++	struct device *dev;
++	struct ssam_controller *ctrl;
++	unsigned long flags;
++
++	struct miscdevice mdev;
++	wait_queue_head_t waitq;
++	struct mutex write_lock;          /* Guards order of events/notifications. */
++	struct rw_semaphore client_lock;  /* Guards client list.                   */
++	struct list_head client_list;
++
++	struct delayed_work state_work;
++	struct {
++		struct ssam_bas_base_info base;
++		u8 device_mode;
++		u8 latch_status;
++	} state;
++
++	struct delayed_work mode_work;
++	struct input_dev *mode_switch;
++
++	struct ssam_event_notifier notif;
++};
++
++enum sdtx_client_state {
++	SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
++};
++
++struct sdtx_client {
++	struct sdtx_device *ddev;
++	struct list_head node;
++	unsigned long flags;
++
++	struct fasync_struct *fasync;
++
++	struct mutex read_lock;           /* Guards FIFO buffer read access. */
++	DECLARE_KFIFO(buffer, u8, 512);
++};
++
++static void __sdtx_device_release(struct kref *kref)
++{
++	struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
++
++	mutex_destroy(&ddev->write_lock);
++	kfree(ddev);
++}
++
++static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
++{
++	if (ddev)
++		kref_get(&ddev->kref);
++
++	return ddev;
++}
++
++static void sdtx_device_put(struct sdtx_device *ddev)
++{
++	if (ddev)
++		kref_put(&ddev->kref, __sdtx_device_release);
++}
++
++
++/* -- Firmware value translations. ------------------------------------------ */
++
++static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
++{
++	switch (state) {
++	case SSAM_BAS_BASE_STATE_ATTACHED:
++		return SDTX_BASE_ATTACHED;
++
++	case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
++		return SDTX_BASE_DETACHED;
++
++	case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
++		return SDTX_DETACH_NOT_FEASIBLE;
++
++	default:
++		dev_err(ddev->dev, "unknown base state: %#04x\n", state);
++		return SDTX_UNKNOWN(state);
++	}
++}
++
++static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
++{
++	switch (status) {
++	case SSAM_BAS_LATCH_STATUS_CLOSED:
++		return SDTX_LATCH_CLOSED;
++
++	case SSAM_BAS_LATCH_STATUS_OPENED:
++		return SDTX_LATCH_OPENED;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
++		return SDTX_ERR_FAILED_TO_OPEN;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
++		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
++		return SDTX_ERR_FAILED_TO_CLOSE;
++
++	default:
++		dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
++		return SDTX_UNKNOWN(status);
++	}
++}
++
++static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
++{
++	switch (reason) {
++	case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
++		return SDTX_DETACH_NOT_FEASIBLE;
++
++	case SSAM_BAS_CANCEL_REASON_TIMEOUT:
++		return SDTX_DETACH_TIMEDOUT;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
++		return SDTX_ERR_FAILED_TO_OPEN;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
++		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
++		return SDTX_ERR_FAILED_TO_CLOSE;
++
++	default:
++		dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
++		return SDTX_UNKNOWN(reason);
++	}
++}
++
++
++/* -- IOCTLs. --------------------------------------------------------------- */
++
++static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
++				    struct sdtx_base_info __user *buf)
++{
++	struct ssam_bas_base_info raw;
++	struct sdtx_base_info info;
++	int status;
++
++	lockdep_assert_held_read(&ddev->lock);
++
++	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
++	if (status < 0)
++		return status;
++
++	info.state = sdtx_translate_base_state(ddev, raw.state);
++	info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
++
++	if (copy_to_user(buf, &info, sizeof(info)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
++{
++	u8 mode;
++	int status;
++
++	lockdep_assert_held_read(&ddev->lock);
++
++	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
++	if (status < 0)
++		return status;
++
++	return put_user(mode, buf);
++}
++
++static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
++{
++	u8 latch;
++	int status;
++
++	lockdep_assert_held_read(&ddev->lock);
++
++	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
++	if (status < 0)
++		return status;
++
++	return put_user(sdtx_translate_latch_status(ddev, latch), buf);
++}
++
++static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
++{
++	struct sdtx_device *ddev = client->ddev;
++
++	lockdep_assert_held_read(&ddev->lock);
++
++	switch (cmd) {
++	case SDTX_IOCTL_EVENTS_ENABLE:
++		set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
++		return 0;
++
++	case SDTX_IOCTL_EVENTS_DISABLE:
++		clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
++		return 0;
++
++	case SDTX_IOCTL_LATCH_LOCK:
++		return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_UNLOCK:
++		return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_REQUEST:
++		return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_CONFIRM:
++		return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_HEARTBEAT:
++		return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_CANCEL:
++		return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
++
++	case SDTX_IOCTL_GET_BASE_INFO:
++		return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
++
++	case SDTX_IOCTL_GET_DEVICE_MODE:
++		return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
++
++	case SDTX_IOCTL_GET_LATCH_STATUS:
++		return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
++
++	default:
++		return -EINVAL;
++	}
++}
++
++static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	struct sdtx_client *client = file->private_data;
++	long status;
++
++	if (down_read_killable(&client->ddev->lock))
++		return -ERESTARTSYS;
++
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
++		up_read(&client->ddev->lock);
++		return -ENODEV;
++	}
++
++	status = __surface_dtx_ioctl(client, cmd, arg);
++
++	up_read(&client->ddev->lock);
++	return status;
++}
++
++
++/* -- File operations. ------------------------------------------------------ */
++
++static int surface_dtx_open(struct inode *inode, struct file *file)
++{
++	struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
++	struct sdtx_client *client;
++
++	/* Initialize client. */
++	client = kzalloc(sizeof(*client), GFP_KERNEL);
++	if (!client)
++		return -ENOMEM;
++
++	client->ddev = sdtx_device_get(ddev);
++
++	INIT_LIST_HEAD(&client->node);
++
++	mutex_init(&client->read_lock);
++	INIT_KFIFO(client->buffer);
++
++	file->private_data = client;
++
++	/* Attach client. */
++	down_write(&ddev->client_lock);
++
++	/*
++	 * Do not add a new client if the device has been shut down. Note that
++	 * it's enough to hold the client_lock here as, during shutdown, we
++	 * only acquire that lock and remove clients after marking the device
++	 * as shut down.
++	 */
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++		up_write(&ddev->client_lock);
++		sdtx_device_put(client->ddev);
++		kfree(client);
++		return -ENODEV;
++	}
++
++	list_add_tail(&client->node, &ddev->client_list);
++	up_write(&ddev->client_lock);
++
++	stream_open(inode, file);
++	return 0;
++}
++
++static int surface_dtx_release(struct inode *inode, struct file *file)
++{
++	struct sdtx_client *client = file->private_data;
++
++	/* Detach client. */
++	down_write(&client->ddev->client_lock);
++	list_del(&client->node);
++	up_write(&client->ddev->client_lock);
++
++	/* Free client. */
++	sdtx_device_put(client->ddev);
++	mutex_destroy(&client->read_lock);
++	kfree(client);
++
++	return 0;
++}
++
++static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
++{
++	struct sdtx_client *client = file->private_data;
++	struct sdtx_device *ddev = client->ddev;
++	unsigned int copied;
++	int status = 0;
++
++	if (down_read_killable(&ddev->lock))
++		return -ERESTARTSYS;
++
++	/* Make sure we're not shut down. */
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++		up_read(&ddev->lock);
++		return -ENODEV;
++	}
++
++	do {
++		/* Check availability, wait if necessary. */
++		if (kfifo_is_empty(&client->buffer)) {
++			up_read(&ddev->lock);
++
++			if (file->f_flags & O_NONBLOCK)
++				return -EAGAIN;
++
++			status = wait_event_interruptible(ddev->waitq,
++							  !kfifo_is_empty(&client->buffer) ||
++							  test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
++								   &ddev->flags));
++			if (status < 0)
++				return status;
++
++			if (down_read_killable(&client->ddev->lock))
++				return -ERESTARTSYS;
++
++			/* Need to check that we're not shut down again. */
++			if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++				up_read(&ddev->lock);
++				return -ENODEV;
++			}
++		}
++
++		/* Try to read from FIFO. */
++		if (mutex_lock_interruptible(&client->read_lock)) {
++			up_read(&ddev->lock);
++			return -ERESTARTSYS;
++		}
++
++		status = kfifo_to_user(&client->buffer, buf, count, &copied);
++		mutex_unlock(&client->read_lock);
++
++		if (status < 0) {
++			up_read(&ddev->lock);
++			return status;
++		}
++
++		/* We might not have gotten anything, check this here. */
++		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
++			up_read(&ddev->lock);
++			return -EAGAIN;
++		}
++	} while (copied == 0);
++
++	up_read(&ddev->lock);
++	return copied;
++}
++
++static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
++{
++	struct sdtx_client *client = file->private_data;
++	__poll_t events = 0;
++
++	if (down_read_killable(&client->ddev->lock))
++		return -ERESTARTSYS;
++
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
++		up_read(&client->ddev->lock);
++		return EPOLLHUP | EPOLLERR;
++	}
++
++	poll_wait(file, &client->ddev->waitq, pt);
++
++	if (!kfifo_is_empty(&client->buffer))
++		events |= EPOLLIN | EPOLLRDNORM;
++
++	up_read(&client->ddev->lock);
++	return events;
++}
++
++static int surface_dtx_fasync(int fd, struct file *file, int on)
++{
++	struct sdtx_client *client = file->private_data;
++
++	return fasync_helper(fd, file, on, &client->fasync);
++}
++
++static const struct file_operations surface_dtx_fops = {
++	.owner          = THIS_MODULE,
++	.open           = surface_dtx_open,
++	.release        = surface_dtx_release,
++	.read           = surface_dtx_read,
++	.poll           = surface_dtx_poll,
++	.fasync         = surface_dtx_fasync,
++	.unlocked_ioctl = surface_dtx_ioctl,
++	.compat_ioctl   = surface_dtx_ioctl,
++	.llseek         = no_llseek,
++};
++
++
++/* -- Event handling/forwarding. -------------------------------------------- */
++
++/*
++ * The device operation mode is not immediately updated on the EC when the
++ * base has been connected, i.e. querying the device mode inside the
++ * connection event callback yields an outdated value. Thus, we can only
++ * determine the new tablet-mode switch and device mode values after some
++ * time.
++ *
++ * These delays have been chosen by experimenting. We first delay on connect
++ * events, then check and validate the device mode against the base state and
++ * if invalid delay again by the "recheck" delay.
++ */
++#define SDTX_DEVICE_MODE_DELAY_CONNECT	msecs_to_jiffies(100)
++#define SDTX_DEVICE_MODE_DELAY_RECHECK	msecs_to_jiffies(100)
++
++struct sdtx_status_event {
++	struct sdtx_event e;
++	__u16 v;
++} __packed;
++
++struct sdtx_base_info_event {
++	struct sdtx_event e;
++	struct sdtx_base_info v;
++} __packed;
++
++union sdtx_generic_event {
++	struct sdtx_event common;
++	struct sdtx_status_event status;
++	struct sdtx_base_info_event base;
++};
++
++static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
++
++/* Must be executed with ddev->write_lock held. */
++static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
++{
++	const size_t len = sizeof(struct sdtx_event) + evt->length;
++	struct sdtx_client *client;
++
++	lockdep_assert_held(&ddev->write_lock);
++
++	down_read(&ddev->client_lock);
++	list_for_each_entry(client, &ddev->client_list, node) {
++		if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
++			continue;
++
++		if (likely(kfifo_avail(&client->buffer) >= len))
++			kfifo_in(&client->buffer, (const u8 *)evt, len);
++		else
++			dev_warn(ddev->dev, "event buffer overrun\n");
++
++		kill_fasync(&client->fasync, SIGIO, POLL_IN);
++	}
++	up_read(&ddev->client_lock);
++
++	wake_up_interruptible(&ddev->waitq);
++}
++
++static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
++{
++	struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
++	union sdtx_generic_event event;
++	size_t len;
++
++	/* Validate event payload length. */
++	switch (in->command_id) {
++	case SAM_EVENT_CID_DTX_CONNECTION:
++		len = 2 * sizeof(u8);
++		break;
++
++	case SAM_EVENT_CID_DTX_REQUEST:
++		len = 0;
++		break;
++
++	case SAM_EVENT_CID_DTX_CANCEL:
++		len = sizeof(u8);
++		break;
++
++	case SAM_EVENT_CID_DTX_LATCH_STATUS:
++		len = sizeof(u8);
++		break;
++
++	default:
++		return 0;
++	};
++
++	if (in->length != len) {
++		dev_err(ddev->dev,
++			"unexpected payload size for event %#04x: got %u, expected %zu\n",
++			in->command_id, in->length, len);
++		return 0;
++	}
++
++	mutex_lock(&ddev->write_lock);
++
++	/* Translate event. */
++	switch (in->command_id) {
++	case SAM_EVENT_CID_DTX_CONNECTION:
++		clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
++
++		/* If state has not changed: do not send new event. */
++		if (ddev->state.base.state == in->data[0] &&
++		    ddev->state.base.base_id == in->data[1])
++			goto out;
++
++		ddev->state.base.state = in->data[0];
++		ddev->state.base.base_id = in->data[1];
++
++		event.base.e.length = sizeof(struct sdtx_base_info);
++		event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
++		event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
++		event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
++		break;
++
++	case SAM_EVENT_CID_DTX_REQUEST:
++		event.common.code = SDTX_EVENT_REQUEST;
++		event.common.length = 0;
++		break;
++
++	case SAM_EVENT_CID_DTX_CANCEL:
++		event.status.e.length = sizeof(u16);
++		event.status.e.code = SDTX_EVENT_CANCEL;
++		event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
++		break;
++
++	case SAM_EVENT_CID_DTX_LATCH_STATUS:
++		clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
++
++		/* If state has not changed: do not send new event. */
++		if (ddev->state.latch_status == in->data[0])
++			goto out;
++
++		ddev->state.latch_status = in->data[0];
++
++		event.status.e.length = sizeof(u16);
++		event.status.e.code = SDTX_EVENT_LATCH_STATUS;
++		event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
++		break;
++	}
++
++	sdtx_push_event(ddev, &event.common);
++
++	/* Update device mode on base connection change. */
++	if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
++		unsigned long delay;
++
++		delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
++		sdtx_update_device_mode(ddev, delay);
++	}
++
++out:
++	mutex_unlock(&ddev->write_lock);
++	return SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- State update functions. ----------------------------------------------- */
++
++static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
++{
++	return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
++		(mode == SDTX_DEVICE_MODE_TABLET)) ||
++	       ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
++		(mode != SDTX_DEVICE_MODE_TABLET));
++}
++
++static void sdtx_device_mode_workfn(struct work_struct *work)
++{
++	struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
++	struct sdtx_status_event event;
++	struct ssam_bas_base_info base;
++	int status, tablet;
++	u8 mode;
++
++	/* Get operation mode. */
++	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
++	if (status) {
++		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
++		return;
++	}
++
++	/* Get base info. */
++	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
++	if (status) {
++		dev_err(ddev->dev, "failed to get base info: %d\n", status);
++		return;
++	}
++
++	/*
++	 * In some cases (specifically when attaching the base), the device
++	 * mode isn't updated right away. Thus we check if the device mode
++	 * makes sense for the given base state and try again later if it
++	 * doesn't.
++	 */
++	if (sdtx_device_mode_invalid(mode, base.state)) {
++		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
++		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
++		return;
++	}
++
++	mutex_lock(&ddev->write_lock);
++	clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
++
++	/* Avoid sending duplicate device-mode events. */
++	if (ddev->state.device_mode == mode) {
++		mutex_unlock(&ddev->write_lock);
++		return;
++	}
++
++	ddev->state.device_mode = mode;
++
++	event.e.length = sizeof(u16);
++	event.e.code = SDTX_EVENT_DEVICE_MODE;
++	event.v = mode;
++
++	sdtx_push_event(ddev, &event.e);
++
++	/* Send SW_TABLET_MODE event. */
++	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
++	input_sync(ddev->mode_switch);
++
++	mutex_unlock(&ddev->write_lock);
++}
++
++static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
++{
++	schedule_delayed_work(&ddev->mode_work, delay);
++}
++
++/* Must be executed with ddev->write_lock held. */
++static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
++					    struct ssam_bas_base_info info)
++{
++	struct sdtx_base_info_event event;
++
++	lockdep_assert_held(&ddev->write_lock);
++
++	/* Prevent duplicate events. */
++	if (ddev->state.base.state == info.state &&
++	    ddev->state.base.base_id == info.base_id)
++		return;
++
++	ddev->state.base = info;
++
++	event.e.length = sizeof(struct sdtx_base_info);
++	event.e.code = SDTX_EVENT_BASE_CONNECTION;
++	event.v.state = sdtx_translate_base_state(ddev, info.state);
++	event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
++
++	sdtx_push_event(ddev, &event.e);
++}
++
++/* Must be executed with ddev->write_lock held. */
++static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
++{
++	struct sdtx_status_event event;
++	int tablet;
++
++	/*
++	 * Note: This function must be called after updating the base state
++	 * via __sdtx_device_state_update_base(), as we rely on the updated
++	 * base state value in the validity check below.
++	 */
++
++	lockdep_assert_held(&ddev->write_lock);
++
++	if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
++		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
++		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
++		return;
++	}
++
++	/* Prevent duplicate events. */
++	if (ddev->state.device_mode == mode)
++		return;
++
++	ddev->state.device_mode = mode;
++
++	/* Send event. */
++	event.e.length = sizeof(u16);
++	event.e.code = SDTX_EVENT_DEVICE_MODE;
++	event.v = mode;
++
++	sdtx_push_event(ddev, &event.e);
++
++	/* Send SW_TABLET_MODE event. */
++	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
++	input_sync(ddev->mode_switch);
++}
++
++/* Must be executed with ddev->write_lock held. */
++static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
++{
++	struct sdtx_status_event event;
++
++	lockdep_assert_held(&ddev->write_lock);
++
++	/* Prevent duplicate events. */
++	if (ddev->state.latch_status == status)
++		return;
++
++	ddev->state.latch_status = status;
++
++	event.e.length = sizeof(struct sdtx_base_info);
++	event.e.code = SDTX_EVENT_BASE_CONNECTION;
++	event.v = sdtx_translate_latch_status(ddev, status);
++
++	sdtx_push_event(ddev, &event.e);
++}
++
++static void sdtx_device_state_workfn(struct work_struct *work)
++{
++	struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
++	struct ssam_bas_base_info base;
++	u8 mode, latch;
++	int status;
++
++	/* Mark everything as dirty. */
++	set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
++	set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
++	set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
++
++	/*
++	 * Ensure that the state gets marked as dirty before continuing to
++	 * query it. Necessary to ensure that clear_bit() calls in
++	 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
++	 * bits if an event is received while updating the state here.
++	 */
++	smp_mb__after_atomic();
++
++	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
++	if (status) {
++		dev_err(ddev->dev, "failed to get base state: %d\n", status);
++		return;
++	}
++
++	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
++	if (status) {
++		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
++		return;
++	}
++
++	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
++	if (status) {
++		dev_err(ddev->dev, "failed to get latch status: %d\n", status);
++		return;
++	}
++
++	mutex_lock(&ddev->write_lock);
++
++	/*
++	 * If the respective dirty-bit has been cleared, an event has been
++	 * received, updating this state. The queried state may thus be out of
++	 * date. At this point, we can safely assume that the state provided
++	 * by the event is either up to date, or we're about to receive
++	 * another event updating it.
++	 */
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
++		__sdtx_device_state_update_base(ddev, base);
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
++		__sdtx_device_state_update_mode(ddev, mode);
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
++		__sdtx_device_state_update_latch(ddev, latch);
++
++	mutex_unlock(&ddev->write_lock);
++}
++
++static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
++{
++	schedule_delayed_work(&ddev->state_work, delay);
++}
++
++
++/* -- Common device initialization. ----------------------------------------- */
++
++static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
++			    struct ssam_controller *ctrl)
++{
++	int status, tablet_mode;
++
++	/* Basic initialization. */
++	kref_init(&ddev->kref);
++	init_rwsem(&ddev->lock);
++	ddev->dev = dev;
++	ddev->ctrl = ctrl;
++
++	ddev->mdev.minor = MISC_DYNAMIC_MINOR;
++	ddev->mdev.name = "surface_dtx";
++	ddev->mdev.nodename = "surface/dtx";
++	ddev->mdev.fops = &surface_dtx_fops;
++
++	ddev->notif.base.priority = 1;
++	ddev->notif.base.fn = sdtx_notifier;
++	ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
++	ddev->notif.event.id.instance = 0;
++	ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	init_waitqueue_head(&ddev->waitq);
++	mutex_init(&ddev->write_lock);
++	init_rwsem(&ddev->client_lock);
++	INIT_LIST_HEAD(&ddev->client_list);
++
++	INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
++	INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
++
++	/*
++	 * Get current device state. We want to guarantee that events are only
++	 * sent when state actually changes. Thus we cannot use special
++	 * "uninitialized" values, as that would cause problems when manually
++	 * querying the state in surface_dtx_pm_complete(). I.e. we would not
++	 * be able to detect state changes there if no change event has been
++	 * received between driver initialization and first device suspension.
++	 *
++	 * Note that we also need to do this before registering the event
++	 * notifier, as that may access the state values.
++	 */
++	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
++	if (status)
++		return status;
++
++	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
++	if (status)
++		return status;
++
++	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
++	if (status)
++		return status;
++
++	/* Set up tablet mode switch. */
++	ddev->mode_switch = input_allocate_device();
++	if (!ddev->mode_switch)
++		return -ENOMEM;
++
++	ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
++	ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
++	ddev->mode_switch->id.bustype = BUS_HOST;
++	ddev->mode_switch->dev.parent = ddev->dev;
++
++	tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
++	input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
++
++	status = input_register_device(ddev->mode_switch);
++	if (status) {
++		input_free_device(ddev->mode_switch);
++		return status;
++	}
++
++	/* Set up event notifier. */
++	status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
++	if (status)
++		goto err_notif;
++
++	/* Register miscdevice. */
++	status = misc_register(&ddev->mdev);
++	if (status)
++		goto err_mdev;
++
++	/*
++	 * Update device state in case it has changed between getting the
++	 * initial mode and registering the event notifier.
++	 */
++	sdtx_update_device_state(ddev, 0);
++	return 0;
++
++err_notif:
++	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
++	cancel_delayed_work_sync(&ddev->mode_work);
++err_mdev:
++	input_unregister_device(ddev->mode_switch);
++	return status;
++}
++
++static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
++{
++	struct sdtx_device *ddev;
++	int status;
++
++	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
++	if (!ddev)
++		return ERR_PTR(-ENOMEM);
++
++	status = sdtx_device_init(ddev, dev, ctrl);
++	if (status) {
++		sdtx_device_put(ddev);
++		return ERR_PTR(status);
++	}
++
++	return ddev;
++}
++
++static void sdtx_device_destroy(struct sdtx_device *ddev)
++{
++	struct sdtx_client *client;
++
++	/*
++	 * Mark device as shut-down. Prevent new clients from being added and
++	 * new operations from being executed.
++	 */
++	set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
++
++	/* Disable notifiers, prevent new events from arriving. */
++	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
++
++	/* Stop mode_work, prevent access to mode_switch. */
++	cancel_delayed_work_sync(&ddev->mode_work);
++
++	/* Stop state_work. */
++	cancel_delayed_work_sync(&ddev->state_work);
++
++	/* With mode_work canceled, we can unregister the mode_switch. */
++	input_unregister_device(ddev->mode_switch);
++
++	/* Wake up async clients. */
++	down_write(&ddev->client_lock);
++	list_for_each_entry(client, &ddev->client_list, node) {
++		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
++	}
++	up_write(&ddev->client_lock);
++
++	/* Wake up blocking clients. */
++	wake_up_interruptible(&ddev->waitq);
++
++	/*
++	 * Wait for clients to finish their current operation. After this, the
++	 * controller and device references are guaranteed to be no longer in
++	 * use.
++	 */
++	down_write(&ddev->lock);
++	ddev->dev = NULL;
++	ddev->ctrl = NULL;
++	up_write(&ddev->lock);
++
++	/* Finally remove the misc-device. */
++	misc_deregister(&ddev->mdev);
++
++	/*
++	 * We're now guaranteed that sdtx_device_open() won't be called any
++	 * more, so we can now drop out reference.
++	 */
++	sdtx_device_put(ddev);
++}
++
++
++/* -- PM ops. --------------------------------------------------------------- */
++
++#ifdef CONFIG_PM_SLEEP
++
++static void surface_dtx_pm_complete(struct device *dev)
++{
++	struct sdtx_device *ddev = dev_get_drvdata(dev);
++
++	/*
++	 * Normally, the EC will store events while suspended (i.e. in
++	 * display-off state) and release them when resumed (i.e. transitioned
++	 * to display-on state). During hibernation, however, the EC will be
++	 * shut down and does not store events. Furthermore, events might be
++	 * dropped during prolonged suspension (it is currently unknown how
++	 * big this event buffer is and how it behaves on overruns).
++	 *
++	 * To prevent any problems, we update the device state here. We do
++	 * this delayed to ensure that any events sent by the EC directly
++	 * after resuming will be handled first. The delay below has been
++	 * chosen (experimentally), so that there should be ample time for
++	 * these events to be handled, before we check and, if necessary,
++	 * update the state.
++	 */
++	sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
++}
++
++static const struct dev_pm_ops surface_dtx_pm_ops = {
++	.complete = surface_dtx_pm_complete,
++};
++
++#else /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops surface_dtx_pm_ops = {};
++
++#endif /* CONFIG_PM_SLEEP */
++
++
++/* -- Platform driver. ------------------------------------------------------ */
++
++static int surface_dtx_platform_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct sdtx_device *ddev;
++
++	/* Link to EC. */
++	ctrl = ssam_client_bind(&pdev->dev);
++	if (IS_ERR(ctrl))
++		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++	ddev = sdtx_device_create(&pdev->dev, ctrl);
++	if (IS_ERR(ddev))
++		return PTR_ERR(ddev);
++
++	platform_set_drvdata(pdev, ddev);
++	return 0;
++}
++
++static int surface_dtx_platform_remove(struct platform_device *pdev)
++{
++	sdtx_device_destroy(platform_get_drvdata(pdev));
++	return 0;
++}
++
++static const struct acpi_device_id surface_dtx_acpi_match[] = {
++	{ "MSHW0133", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
++
++static struct platform_driver surface_dtx_platform_driver = {
++	.probe = surface_dtx_platform_probe,
++	.remove = surface_dtx_platform_remove,
++	.driver = {
++		.name = "surface_dtx_pltf",
++		.acpi_match_table = surface_dtx_acpi_match,
++		.pm = &surface_dtx_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_dtx_platform_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
+new file mode 100644
+index 000000000000..0833aab0d819
+--- /dev/null
++++ b/include/uapi/linux/surface_aggregator/dtx.h
+@@ -0,0 +1,146 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * Surface DTX (clipboard detachment system driver) user-space interface.
++ *
++ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
++ * device allows user-space to control the clipboard detachment process on
++ * Surface Book series devices.
++ *
++ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
++#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/* Status/error categories */
++#define SDTX_CATEGORY_STATUS		0x0000
++#define SDTX_CATEGORY_RUNTIME_ERROR	0x1000
++#define SDTX_CATEGORY_HARDWARE_ERROR	0x2000
++#define SDTX_CATEGORY_UNKNOWN		0xf000
++
++#define SDTX_CATEGORY_MASK		0xf000
++#define SDTX_CATEGORY(value)		((value) & SDTX_CATEGORY_MASK)
++
++#define SDTX_STATUS(code)		((code) | SDTX_CATEGORY_STATUS)
++#define SDTX_ERR_RT(code)		((code) | SDTX_CATEGORY_RUNTIME_ERROR)
++#define SDTX_ERR_HW(code)		((code) | SDTX_CATEGORY_HARDWARE_ERROR)
++#define SDTX_UNKNOWN(code)		((code) | SDTX_CATEGORY_UNKNOWN)
++
++#define SDTX_SUCCESS(value)		(SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
++
++/* Latch status values */
++#define SDTX_LATCH_CLOSED		SDTX_STATUS(0x00)
++#define SDTX_LATCH_OPENED		SDTX_STATUS(0x01)
++
++/* Base state values */
++#define SDTX_BASE_DETACHED		SDTX_STATUS(0x00)
++#define SDTX_BASE_ATTACHED		SDTX_STATUS(0x01)
++
++/* Runtime errors (non-critical) */
++#define SDTX_DETACH_NOT_FEASIBLE	SDTX_ERR_RT(0x01)
++#define SDTX_DETACH_TIMEDOUT		SDTX_ERR_RT(0x02)
++
++/* Hardware errors (critical) */
++#define SDTX_ERR_FAILED_TO_OPEN		SDTX_ERR_HW(0x01)
++#define SDTX_ERR_FAILED_TO_REMAIN_OPEN	SDTX_ERR_HW(0x02)
++#define SDTX_ERR_FAILED_TO_CLOSE	SDTX_ERR_HW(0x03)
++
++/* Base types */
++#define SDTX_DEVICE_TYPE_HID		0x0100
++#define SDTX_DEVICE_TYPE_SSH		0x0200
++
++#define SDTX_DEVICE_TYPE_MASK		0x0f00
++#define SDTX_DEVICE_TYPE(value)		((value) & SDTX_DEVICE_TYPE_MASK)
++
++#define SDTX_BASE_TYPE_HID(id)		((id) | SDTX_DEVICE_TYPE_HID)
++#define SDTX_BASE_TYPE_SSH(id)		((id) | SDTX_DEVICE_TYPE_SSH)
++
++/**
++ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
++ * attached to the base of the device.
++ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
++ *                           device operates as tablet.
++ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
++ *                           and the device operates as laptop.
++ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
++ *                           The device operates as tablet with keyboard and
++ *                           touchpad deactivated, however, the base battery
++ *                           and, if present in the specific device model, dGPU
++ *                           are available to the system.
++ */
++enum sdtx_device_mode {
++	SDTX_DEVICE_MODE_TABLET		= 0x00,
++	SDTX_DEVICE_MODE_LAPTOP		= 0x01,
++	SDTX_DEVICE_MODE_STUDIO		= 0x02,
++};
++
++/**
++ * struct sdtx_event - Event provided by reading from the DTX device file.
++ * @length: Length of the event payload, in bytes.
++ * @code:   Event code, detailing what type of event this is.
++ * @data:   Payload of the event, containing @length bytes.
++ *
++ * See &enum sdtx_event_code for currently valid event codes.
++ */
++struct sdtx_event {
++	__u16 length;
++	__u16 code;
++	__u8 data[];
++} __attribute__((__packed__));
++
++/**
++ * enum sdtx_event_code - Code describing the type of an event.
++ * @SDTX_EVENT_REQUEST:         Detachment request event type.
++ * @SDTX_EVENT_CANCEL:          Cancel detachment process event type.
++ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
++ * @SDTX_EVENT_LATCH_STATUS:    Latch status change event type.
++ * @SDTX_EVENT_DEVICE_MODE:     Device mode change event type.
++ *
++ * Used in &struct sdtx_event to describe the type of the event. Further event
++ * codes are reserved for future use. Any event parser should be able to
++ * gracefully handle unknown events, i.e. by simply skipping them.
++ *
++ * Consult the DTX user-space interface documentation for details regarding
++ * the individual event types.
++ */
++enum sdtx_event_code {
++	SDTX_EVENT_REQUEST		= 1,
++	SDTX_EVENT_CANCEL		= 2,
++	SDTX_EVENT_BASE_CONNECTION	= 3,
++	SDTX_EVENT_LATCH_STATUS		= 4,
++	SDTX_EVENT_DEVICE_MODE		= 5,
++};
++
++/**
++ * struct sdtx_base_info - Describes if and what type of base is connected.
++ * @state:   The state of the connection. Valid values are %SDTX_BASE_DETACHED,
++ *           %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
++ *           is attached but low clipboard battery prevents detachment). Other
++ *           values are currently reserved.
++ * @base_id: The type of base connected. Zero if no base is connected.
++ */
++struct sdtx_base_info {
++	__u16 state;
++	__u16 base_id;
++} __attribute__((__packed__));
++
++/* IOCTLs */
++#define SDTX_IOCTL_EVENTS_ENABLE	_IO(0xa5, 0x21)
++#define SDTX_IOCTL_EVENTS_DISABLE	_IO(0xa5, 0x22)
++
++#define SDTX_IOCTL_LATCH_LOCK		_IO(0xa5, 0x23)
++#define SDTX_IOCTL_LATCH_UNLOCK		_IO(0xa5, 0x24)
++
++#define SDTX_IOCTL_LATCH_REQUEST	_IO(0xa5, 0x25)
++#define SDTX_IOCTL_LATCH_CONFIRM	_IO(0xa5, 0x26)
++#define SDTX_IOCTL_LATCH_HEARTBEAT	_IO(0xa5, 0x27)
++#define SDTX_IOCTL_LATCH_CANCEL		_IO(0xa5, 0x28)
++
++#define SDTX_IOCTL_GET_BASE_INFO	_IOR(0xa5, 0x29, struct sdtx_base_info)
++#define SDTX_IOCTL_GET_DEVICE_MODE	_IOR(0xa5, 0x2a, __u16)
++#define SDTX_IOCTL_GET_LATCH_STATUS	_IOR(0xa5, 0x2b, __u16)
++
++#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
+-- 
+2.30.1
+
+From 5666bacf4fa214ee9b61767660ee796568ec881a Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Tue, 9 Feb 2021 02:50:11 +0100
+Subject: [PATCH] platform/surface: dtx: Add support for native SSAM devices
+
+Add support for native SSAM devices to the DTX driver. This allows
+support for the Surface Book 3, on which the DTX device is not present
+in ACPI.
+
+Patchset: surface-sam
+---
+ drivers/platform/surface/Kconfig       |  4 ++
+ drivers/platform/surface/surface_dtx.c | 90 +++++++++++++++++++++++++-
+ 2 files changed, 93 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index 745f9d2eb6a7..dea313989b4c 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -127,6 +127,10 @@ config SURFACE_DTX
+ 	  behavior of this process, which includes the option to abort it in
+ 	  case the base is still in use or speed it up in case it is not.
+ 
++	  Note that this module can be built without support for the Surface
++	  Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
++	  some devices, specifically the Surface Book 3, will not be supported.
++
+ config SURFACE_GPE
+ 	tristate "Surface GPE/Lid Support Driver"
+ 	depends on DMI
+diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
+index a95adc1094aa..4bb5d286bf95 100644
+--- a/drivers/platform/surface/surface_dtx.c
++++ b/drivers/platform/surface/surface_dtx.c
+@@ -27,6 +27,7 @@
+ #include <linux/workqueue.h>
+ 
+ #include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
+ #include <linux/surface_aggregator/dtx.h>
+ 
+ 
+@@ -1194,7 +1195,94 @@ static struct platform_driver surface_dtx_platform_driver = {
+ 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ 	},
+ };
+-module_platform_driver(surface_dtx_platform_driver);
++
++
++/* -- SSAM device driver. --------------------------------------------------- */
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++static int surface_dtx_ssam_probe(struct ssam_device *sdev)
++{
++	struct sdtx_device *ddev;
++
++	ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
++	if (IS_ERR(ddev))
++		return PTR_ERR(ddev);
++
++	ssam_device_set_drvdata(sdev, ddev);
++	return 0;
++}
++
++static void surface_dtx_ssam_remove(struct ssam_device *sdev)
++{
++	sdtx_device_destroy(ssam_device_get_drvdata(sdev));
++}
++
++static const struct ssam_device_id surface_dtx_ssam_match[] = {
++	{ SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
++
++static struct ssam_device_driver surface_dtx_ssam_driver = {
++	.probe = surface_dtx_ssam_probe,
++	.remove = surface_dtx_ssam_remove,
++	.match_table = surface_dtx_ssam_match,
++	.driver = {
++		.name = "surface_dtx",
++		.pm = &surface_dtx_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static int ssam_dtx_driver_register(void)
++{
++	return ssam_device_driver_register(&surface_dtx_ssam_driver);
++}
++
++static void ssam_dtx_driver_unregister(void)
++{
++	ssam_device_driver_unregister(&surface_dtx_ssam_driver);
++}
++
++#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++static int ssam_dtx_driver_register(void)
++{
++	return 0;
++}
++
++static void ssam_dtx_driver_unregister(void)
++{
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init surface_dtx_init(void)
++{
++	int status;
++
++	status = ssam_dtx_driver_register();
++	if (status)
++		return status;
++
++	status = platform_driver_register(&surface_dtx_platform_driver);
++	if (status)
++		ssam_dtx_driver_unregister();
++
++	return status;
++}
++module_init(surface_dtx_init);
++
++static void __exit surface_dtx_exit(void)
++{
++	platform_driver_unregister(&surface_dtx_platform_driver);
++	ssam_dtx_driver_unregister();
++}
++module_exit(surface_dtx_exit);
+ 
+ MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+ MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
+-- 
+2.30.1
+
+From 21ef9d0ead48bb568c3898df5f5b0d99a21977b8 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Tue, 9 Feb 2021 02:55:31 +0100
+Subject: [PATCH] docs: driver-api: Add Surface DTX driver documentation
+
+Add documentation for the user-space interface of the Surface DTX
+(detachment system) driver, used on Microsoft Surface Book series
+devices.
+
+Patchset: surface-sam
+---
+ .../surface_aggregator/clients/dtx.rst        | 718 ++++++++++++++++++
+ .../surface_aggregator/clients/index.rst      |   1 +
+ MAINTAINERS                                   |   1 +
+ 3 files changed, 720 insertions(+)
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/dtx.rst
+
+diff --git a/Documentation/driver-api/surface_aggregator/clients/dtx.rst b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
+new file mode 100644
+index 000000000000..e7e7c20007f0
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
+@@ -0,0 +1,718 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |__u16| replace:: :c:type:`__u16 <__u16>`
++.. |sdtx_event| replace:: :c:type:`struct sdtx_event <sdtx_event>`
++.. |sdtx_event_code| replace:: :c:type:`enum sdtx_event_code <sdtx_event_code>`
++.. |sdtx_base_info| replace:: :c:type:`struct sdtx_base_info <sdtx_base_info>`
++.. |sdtx_device_mode| replace:: :c:type:`struct sdtx_device_mode <sdtx_device_mode>`
++
++======================================================
++User-Space DTX (Clipboard Detachment System) Interface
++======================================================
++
++The ``surface_dtx`` driver is responsible for proper clipboard detachment
++and re-attachment handling. To this end, it provides the ``/dev/surface/dtx``
++device file, through which it can interface with a user-space daemon. This
++daemon is then ultimately responsible for determining and taking necessary
++actions, such as unmounting devices attached to the base,
++unloading/reloading the graphics-driver, user-notifications, etc.
++
++There are two basic communication principles used in this driver: Commands
++(in other parts of the documentation also referred to as requests) and
++events. Commands are sent to the EC and may have a different implications in
++different contexts. Events are sent by the EC upon some internal state
++change. Commands are always driver-initiated, whereas events are always
++initiated by the EC.
++
++.. contents::
++
++Nomenclature
++============
++
++* **Clipboard:**
++  The detachable upper part of the Surface Book, housing the screen and CPU.
++
++* **Base:**
++  The lower part of the Surface Book from which the clipboard can be
++  detached, optionally (model dependent) housing the discrete GPU (dGPU).
++
++* **Latch:**
++  The mechanism keeping the clipboard attached to the base in normal
++  operation and allowing it to be detached when requested.
++
++* **Silently ignored commands:**
++  The command is accepted by the EC as a valid command and acknowledged
++  (following the standard communication protocol), but the EC does not act
++  upon it, i.e. ignores it.e upper part of the
++
++
++Detachment Process
++==================
++
++Warning: This part of the documentation is based on reverse engineering and
++testing and thus may contain errors or be incomplete.
++
++Latch States
++------------
++
++The latch mechanism has two major states: *open* and *closed*. In the
++*closed* state (default), the clipboard is secured to the base, whereas in
++the *open* state, the clipboard can be removed by a user.
++
++The latch can additionally be locked and, correspondingly, unlocked, which
++can influence the detachment procedure. Specifically, this locking mechanism
++is intended to prevent the dGPU, positioned in the base of the device, from
++being hot-unplugged while in use. More details can be found in the
++documentation for the detachment procedure below. By default, the latch is
++unlocked.
++
++Detachment Procedure
++--------------------
++
++Note that the detachment process is governed fully by the EC. The
++``surface_dtx`` driver only relays events from the EC to user-space and
++commands from user-space to the EC, i.e. it does not influence this process.
++
++The detachment process is started with the user pressing the *detach* button
++on the base of the device or executing the ``SDTX_IOCTL_LATCH_REQUEST`` IOCTL.
++Following that:
++
++1. The EC turns on the indicator led on the detach-button, sends a
++   *detach-request* event (``SDTX_EVENT_REQUEST``), and awaits further
++   instructions/commands. In case the latch is unlocked, the led will flash
++   green. If the latch has been locked, the led will be solid red
++
++2. The event is, via the ``surface_dtx`` driver, relayed to user-space, where
++   an appropriate user-space daemon can handle it and send instructions back
++   to the EC via IOCTLs provided by this driver.
++
++3. The EC waits for instructions from user-space and acts according to them.
++   If the EC does not receive any instructions in a given period, it will
++   time out and continue as follows:
++
++   - If the latch is unlocked, the EC will open the latch and the clipboard
++     can be detached from the base. This is the exact behavior as without
++     this driver or any user-space daemon. See the ``SDTX_IOCTL_LATCH_CONFIRM``
++     description below for more details on the follow-up behavior of the EC.
++
++   - If the latch is locked, the EC will *not* open the latch, meaning the
++     clipboard cannot be detached from the base. Furthermore, the EC sends
++     an cancel event (``SDTX_EVENT_CANCEL``) detailing this with the cancel
++     reason ``SDTX_DETACH_TIMEDOUT`` (see :ref:`events` for details).
++
++Valid responses by a user-space daemon to a detachment request event are:
++
++- Execute ``SDTX_IOCTL_LATCH_REQUEST``. This will immediately abort the
++  detachment process. Furthermore, the EC will send a detach-request event,
++  similar to the user pressing the detach-button to cancel said process (see
++  below).
++
++- Execute ``SDTX_IOCTL_LATCH_CONFIRM``. This will cause the EC to open the
++  latch, after which the user can separate clipboard and base.
++
++  As this changes the latch state, a *latch-status* event
++  (``SDTX_EVENT_LATCH_STATUS``) will be sent once the latch has been opened
++  successfully. If the EC fails to open the latch, e.g. due to hardware
++  error or low battery, a latch-cancel event (``SDTX_EVENT_CANCEL``) will be
++  sent with the cancel reason indicating the specific failure.
++
++  If the latch is currently locked, the latch will automatically be
++  unlocked before it is opened.
++
++- Execute ``SDTX_IOCTL_LATCH_HEARTBEAT``. This will reset the internal timeout.
++  No other actions will be performed, i.e. the detachment process will neither
++  be completed nor canceled, and the EC will still be waiting for further
++  responses.
++
++- Execute ``SDTX_IOCTL_LATCH_CANCEL``. This will abort the detachment process,
++  similar to ``SDTX_IOCTL_LATCH_REQUEST``, described above, or the button
++  press, described below. A *generic request* event (``SDTX_EVENT_REQUEST``)
++  is send in response to this. In contrast to those, however, this command
++  does not trigger a new detachment process if none is currently in
++  progress.
++
++- Do nothing. The detachment process eventually times out as described in
++  point 3.
++
++See :ref:`ioctls` for more details on these responses.
++
++It is important to note that, if the user presses the detach button at any
++point when a detachment operation is in progress (i.e. after the EC has sent
++the initial *detach-request* event (``SDTX_EVENT_REQUEST``) and before it
++received the corresponding response concluding the process), the detachment
++process is canceled on the EC-level and an identical event is being sent.
++Thus a *detach-request* event, by itself, does not signal the start of the
++detachment process.
++
++The detachment process may further be canceled by the EC due to hardware
++failures or a low clipboard battery. This is done via a cancel event
++(``SDTX_EVENT_CANCEL``) with the corresponding cancel reason.
++
++
++User-Space Interface Documentation
++==================================
++
++Error Codes and Status Values
++-----------------------------
++
++Error and status codes are divided into different categories, which can be
++used to determine if the status code is an error, and, if it is, the
++severity and type of that error. The current categories are:
++
++.. flat-table:: Overview of Status/Error Categories.
++   :widths: 2 1 3
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Short Description
++
++   * - ``STATUS``
++     - ``0x0000``
++     - Non-error status codes.
++
++   * - ``RUNTIME_ERROR``
++     - ``0x1000``
++     - Non-critical runtime errors.
++
++   * - ``HARDWARE_ERROR``
++     - ``0x2000``
++     - Critical hardware failures.
++
++   * - ``UNKNOWN``
++     - ``0xF000``
++     - Unknown error codes.
++
++Other categories are reserved for future use. The ``SDTX_CATEGORY()`` macro
++can be used to determine the category of any status value. The
++``SDTX_SUCCESS()`` macro can be used to check if the status value is a
++success value (``SDTX_CATEGORY_STATUS``) or if it indicates a failure.
++
++Unknown status or error codes sent by the EC are assigned to the ``UNKNOWN``
++category by the driver and may be implemented via their own code in the
++future.
++
++Currently used error codes are:
++
++.. flat-table:: Overview of Error Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_DETACH_NOT_FEASIBLE``
++     - ``RUNTIME``
++     - ``0x1001``
++     - Detachment not feasible due to low clipboard battery.
++
++   * - ``SDTX_DETACH_TIMEDOUT``
++     - ``RUNTIME``
++     - ``0x1002``
++     - Detachment process timed out while the latch was locked.
++
++   * - ``SDTX_ERR_FAILED_TO_OPEN``
++     - ``HARDWARE``
++     - ``0x2001``
++     - Failed to open latch.
++
++   * - ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``
++     - ``HARDWARE``
++     - ``0x2002``
++     - Failed to keep latch open.
++
++   * - ``SDTX_ERR_FAILED_TO_CLOSE``
++     - ``HARDWARE``
++     - ``0x2003``
++     - Failed to close latch.
++
++Other error codes are reserved for future use. Non-error status codes may
++overlap and are generally only unique within their use-case:
++
++.. flat-table:: Latch Status Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_LATCH_CLOSED``
++     - ``STATUS``
++     - ``0x0000``
++     - Latch is closed/has been closed.
++
++   * - ``SDTX_LATCH_OPENED``
++     - ``STATUS``
++     - ``0x0001``
++     - Latch is open/has been opened.
++
++.. flat-table:: Base State Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_BASE_DETACHED``
++     - ``STATUS``
++     - ``0x0000``
++     - Base has been detached/is not present.
++
++   * - ``SDTX_BASE_ATTACHED``
++     - ``STATUS``
++     - ``0x0001``
++     - Base has been attached/is present.
++
++Again, other codes are reserved for future use.
++
++.. _events:
++
++Events
++------
++
++Events can be received by reading from the device file. They are disabled by
++default and have to be enabled by executing ``SDTX_IOCTL_EVENTS_ENABLE``
++first. All events follow the layout prescribed by |sdtx_event|. Specific
++event types can be identified by their event code, described in
++|sdtx_event_code|. Note that other event codes are reserved for future use,
++thus an event parser must be able to handle any unknown/unsupported event
++types gracefully, by relying on the payload length given in the event header.
++
++Currently provided event types are:
++
++.. flat-table:: Overview of DTX events.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Code
++     - Payload
++     - Short Description
++
++   * - ``SDTX_EVENT_REQUEST``
++     - ``1``
++     - ``0`` bytes
++     - Detachment process initiated/aborted.
++
++   * - ``SDTX_EVENT_CANCEL``
++     - ``2``
++     - ``2`` bytes
++     - EC canceled detachment process.
++
++   * - ``SDTX_EVENT_BASE_CONNECTION``
++     - ``3``
++     - ``4`` bytes
++     - Base connection state changed.
++
++   * - ``SDTX_EVENT_LATCH_STATUS``
++     - ``4``
++     - ``2`` bytes
++     - Latch status changed.
++
++   * - ``SDTX_EVENT_DEVICE_MODE``
++     - ``5``
++     - ``2`` bytes
++     - Device mode changed.
++
++Individual events in more detail:
++
++``SDTX_EVENT_REQUEST``
++^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when a detachment process is started or, if in progress, aborted by the
++user, either via a detach button press or a detach request
++(``SDTX_IOCTL_LATCH_REQUEST``) being sent from user-space.
++
++Does not have any payload.
++
++``SDTX_EVENT_CANCEL``
++^^^^^^^^^^^^^^^^^^^^^
++
++Sent when a detachment process is canceled by the EC due to unfulfilled
++preconditions (e.g. clipboard battery too low to detach) or hardware
++failure. The reason for cancellation is given in the event payload detailed
++below and can be one of
++
++* ``SDTX_DETACH_TIMEDOUT``: Detachment timed out while the latch was locked.
++  The latch has neither been opened nor unlocked.
++
++* ``SDTX_DETACH_NOT_FEASIBLE``: Detachment not feasible due to low clipboard
++  battery.
++
++* ``SDTX_ERR_FAILED_TO_OPEN``: Could not open the latch (hardware failure).
++
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``: Could not keep the latch open (hardware
++  failure).
++
++* ``SDTX_ERR_FAILED_TO_CLOSE``: Could not close the latch (hardware failure).
++
++Other error codes in this context are reserved for future use.
++
++These codes can be classified via the ``SDTX_CATEGORY()`` macro to discern
++between critical hardware errors (``SDTX_CATEGORY_HARDWARE_ERROR``) or
++runtime errors (``SDTX_CATEGORY_RUNTIME_ERROR``), the latter of which may
++happen during normal operation if certain preconditions for detachment are
++not given.
++
++.. flat-table:: Detachment Cancel Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``reason``
++     - |__u16|
++     - Reason for cancellation.
++
++``SDTX_EVENT_BASE_CONNECTION``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the base connection state has changed, i.e. when the base has been
++attached, detached, or detachment has become infeasible due to low clipboard
++battery. The new state and, if a base is connected, ID of the base is
++provided as payload of type |sdtx_base_info| with its layout presented
++below:
++
++.. flat-table:: Base-Connection-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``state``
++     - |__u16|
++     - Base connection state.
++
++   * - ``base_id``
++     - |__u16|
++     - Type of base connected (zero if none).
++
++Possible values for ``state`` are:
++
++* ``SDTX_BASE_DETACHED``,
++* ``SDTX_BASE_ATTACHED``, and
++* ``SDTX_DETACH_NOT_FEASIBLE``.
++
++Other values are reserved for future use.
++
++``SDTX_EVENT_LATCH_STATUS``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the latch status has changed, i.e. when the latch has been opened,
++closed, or an error occurred. The current status is provided as payload:
++
++.. flat-table:: Latch-Status-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``status``
++     - |__u16|
++     - Latch status.
++
++Possible values for ``status`` are:
++
++* ``SDTX_LATCH_CLOSED``,
++* ``SDTX_LATCH_OPENED``,
++* ``SDTX_ERR_FAILED_TO_OPEN``,
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
++* ``SDTX_ERR_FAILED_TO_CLOSE``.
++
++Other values are reserved for future use.
++
++``SDTX_EVENT_DEVICE_MODE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the device mode has changed. The new device mode is provided as
++payload:
++
++.. flat-table:: Device-Mode-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``mode``
++     - |__u16|
++     - Device operation mode.
++
++Possible values for ``mode`` are:
++
++* ``SDTX_DEVICE_MODE_TABLET``,
++* ``SDTX_DEVICE_MODE_LAPTOP``, and
++* ``SDTX_DEVICE_MODE_STUDIO``.
++
++Other values are reserved for future use.
++
++.. _ioctls:
++
++IOCTLs
++------
++
++The following IOCTLs are provided:
++
++.. flat-table:: Overview of DTX IOCTLs
++   :widths: 1 1 1 1 4
++   :header-rows: 1
++
++   * - Type
++     - Number
++     - Direction
++     - Name
++     - Description
++
++   * - ``0xA5``
++     - ``0x21``
++     - ``-``
++     - ``EVENTS_ENABLE``
++     - Enable events for the current file descriptor.
++
++   * - ``0xA5``
++     - ``0x22``
++     - ``-``
++     - ``EVENTS_DISABLE``
++     - Disable events for the current file descriptor.
++
++   * - ``0xA5``
++     - ``0x23``
++     - ``-``
++     - ``LATCH_LOCK``
++     - Lock the latch.
++
++   * - ``0xA5``
++     - ``0x24``
++     - ``-``
++     - ``LATCH_UNLOCK``
++     - Unlock the latch.
++
++   * - ``0xA5``
++     - ``0x25``
++     - ``-``
++     - ``LATCH_REQUEST``
++     - Request clipboard detachment.
++
++   * - ``0xA5``
++     - ``0x26``
++     - ``-``
++     - ``LATCH_CONFIRM``
++     - Confirm clipboard detachment request.
++
++   * - ``0xA5``
++     - ``0x27``
++     - ``-``
++     - ``LATCH_HEARTBEAT``
++     - Send heartbeat signal to EC.
++
++   * - ``0xA5``
++     - ``0x28``
++     - ``-``
++     - ``LATCH_CANCEL``
++     - Cancel detachment process.
++
++   * - ``0xA5``
++     - ``0x29``
++     - ``R``
++     - ``GET_BASE_INFO``
++     - Get current base/connection information.
++
++   * - ``0xA5``
++     - ``0x2A``
++     - ``R``
++     - ``GET_DEVICE_MODE``
++     - Get current device operation mode.
++
++   * - ``0xA5``
++     - ``0x2B``
++     - ``R``
++     - ``GET_LATCH_STATUS``
++     - Get current device latch status.
++
++``SDTX_IOCTL_EVENTS_ENABLE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x22)``.
++
++Enable events for the current file descriptor. Events can be obtained by
++reading from the device, if enabled. Events are disabled by default.
++
++``SDTX_IOCTL_EVENTS_DISABLE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x22)``.
++
++Disable events for the current file descriptor. Events can be obtained by
++reading from the device, if enabled. Events are disabled by default.
++
++``SDTX_IOCTL_LATCH_LOCK``
++^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x23)``.
++
++Locks the latch, causing the detachment procedure to abort without opening
++the latch on timeout. The latch is unlocked by default. This command will be
++silently ignored if the latch is already locked.
++
++``SDTX_IOCTL_LATCH_UNLOCK``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x24)``.
++
++Unlocks the latch, causing the detachment procedure to open the latch on
++timeout. The latch is unlocked by default. This command will not open the
++latch when sent during an ongoing detachment process. It will be silently
++ignored if the latch is already unlocked.
++
++``SDTX_IOCTL_LATCH_REQUEST``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x25)``.
++
++Generic latch request. Behavior depends on the context: If no
++detachment-process is active, detachment is requested. Otherwise the
++currently active detachment-process will be aborted.
++
++If a detachment process is canceled by this operation, a generic detachment
++request event (``SDTX_EVENT_REQUEST``) will be sent.
++
++This essentially behaves the same as a detachment button press.
++
++``SDTX_IOCTL_LATCH_CONFIRM``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x26)``.
++
++Acknowledges and confirms a latch request. If sent during an ongoing
++detachment process, this command causes the latch to be opened immediately.
++The latch will also be opened if it has been locked. In this case, the latch
++lock is reset to the unlocked state.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_LATCH_HEARTBEAT``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x27)``.
++
++Sends a heartbeat, essentially resetting the detachment timeout. This
++command can be used to keep the detachment process alive while work required
++for the detachment to succeed is still in progress.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_LATCH_CANCEL``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x28)``.
++
++Cancels detachment in progress (if any). If a detachment process is canceled
++by this operation, a generic detachment request event
++(``SDTX_EVENT_REQUEST``) will be sent.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_GET_BASE_INFO``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x29, struct sdtx_base_info)``.
++
++Get the current base connection state (i.e. attached/detached) and the type
++of the base connected to the clipboard. This is command essentially provides
++a way to query the information provided by the base connection change event
++(``SDTX_EVENT_BASE_CONNECTION``).
++
++Possible values for ``struct sdtx_base_info.state`` are:
++
++* ``SDTX_BASE_DETACHED``,
++* ``SDTX_BASE_ATTACHED``, and
++* ``SDTX_DETACH_NOT_FEASIBLE``.
++
++Other values are reserved for future use.
++
++``SDTX_IOCTL_GET_DEVICE_MODE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x2A, __u16)``.
++
++Returns the device operation mode, indicating if and how the base is
++attached to the clipboard. This is command essentially provides a way to
++query the information provided by the device mode change event
++(``SDTX_EVENT_DEVICE_MODE``).
++
++Returned values are:
++
++* ``SDTX_DEVICE_MODE_LAPTOP``
++* ``SDTX_DEVICE_MODE_TABLET``
++* ``SDTX_DEVICE_MODE_STUDIO``
++
++See |sdtx_device_mode| for details. Other values are reserved for future
++use.
++
++
++``SDTX_IOCTL_GET_LATCH_STATUS``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x2B, __u16)``.
++
++Get the current latch status or (presumably) the last error encountered when
++trying to open/close the latch. This is command essentially provides a way
++to query the information provided by the latch status change event
++(``SDTX_EVENT_LATCH_STATUS``).
++
++Returned values are:
++
++* ``SDTX_LATCH_CLOSED``,
++* ``SDTX_LATCH_OPENED``,
++* ``SDTX_ERR_FAILED_TO_OPEN``,
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
++* ``SDTX_ERR_FAILED_TO_CLOSE``.
++
++Other values are reserved for future use.
++
++A Note on Base IDs
++------------------
++
++Base types/IDs provided via ``SDTX_EVENT_BASE_CONNECTION`` or
++``SDTX_IOCTL_GET_BASE_INFO`` are directly forwarded from the EC in the lower
++byte of the combined |__u16| value, with the driver storing the EC type from
++which this ID comes in the high byte (without this, base IDs over different
++types of ECs may be overlapping).
++
++The ``SDTX_DEVICE_TYPE()`` macro can be used to determine the EC device
++type. This can be one of
++
++* ``SDTX_DEVICE_TYPE_HID``, for Surface Aggregator Module over HID, and
++
++* ``SDTX_DEVICE_TYPE_SSH``, for Surface Aggregator Module over Surface Serial
++  Hub.
++
++Note that currently only the ``SSH`` type EC is supported, however ``HID``
++type is reserved for future use.
++
++Structures and Enums
++--------------------
++
++.. kernel-doc:: include/uapi/linux/surface_aggregator/dtx.h
++
++API Users
++=========
++
++A user-space daemon utilizing this API can be found at
++https://github.com/linux-surface/surface-dtx-daemon.
+diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
+index 3ccabce23271..98ea9946b8a2 100644
+--- a/Documentation/driver-api/surface_aggregator/clients/index.rst
++++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
+@@ -11,6 +11,7 @@ This is the documentation for client drivers themselves. Refer to
+    :maxdepth: 1
+ 
+    cdev
++   dtx
+    san
+ 
+ .. only::  subproject and html
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 8ea459fc56f4..ba2dfafeb28c 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11790,6 +11790,7 @@ MICROSOFT SURFACE DTX DRIVER
+ M:	Maximilian Luz <luzmaximilian@gmail.com>
+ L:	platform-driver-x86@vger.kernel.org
+ S:	Maintained
++F:	Documentation/driver-api/surface_aggregator/clients/dtx.rst
+ F:	drivers/platform/surface/surface_dtx.c
+ F:	include/uapi/linux/surface_aggregator/dtx.h
+ 
+-- 
+2.30.1
+
+From 1699b3a1c4fadac21f7787d3237eaa636e501a83 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 11 Feb 2021 20:02:57 +0100
+Subject: [PATCH] input: Add bus ID for Surface Aggregator Module
+
+Add a bus identifier for the Surface System Aggregator Module, which
+connects various integrated HID devices on Microsoft Surface models via
+a dedicated HID transport protocol.
+
+Patchset: surface-sam
+---
+ include/uapi/linux/input.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
+index 9a61c28ed3ae..3e81ea3d7df2 100644
+--- a/include/uapi/linux/input.h
++++ b/include/uapi/linux/input.h
+@@ -271,6 +271,7 @@ struct input_mask {
+ #define BUS_RMI			0x1D
+ #define BUS_CEC			0x1E
+ #define BUS_INTEL_ISHTP		0x1F
++#define BUS_SURFACE_AGGREGATOR	0x20
+ 
+ /*
+  * MT_TOOL types
+-- 
+2.30.1
+
+From c000bc102bd387084452507a7ce24e01fad471bb Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 11 Feb 2021 20:08:50 +0100
+Subject: [PATCH] HID: Add support for Surface Aggregator Module HID transport
+
+Add a HID transport driver to support integrated HID devices on newer
+Microsoft Surface models (specifically 7th-generation, i.e. Surface
+Laptop 3, Surface Book 3, and later).
+
+On those models, the internal keyboard and touchpad (as well as some
+other HID devices with currently unknown function) are connected via the
+generic HID subsystem (TC=0x15) of the Surface System Aggregator Module
+(SSAM). This subsystem provides a generic HID transport layer, support
+for which is implemented by this driver.
+
+Patchset: surface-sam
+---
+ MAINTAINERS                                |   7 +
+ drivers/hid/Kconfig                        |   2 +
+ drivers/hid/Makefile                       |   2 +
+ drivers/hid/surface-hid/Kconfig            |  28 +++
+ drivers/hid/surface-hid/Makefile           |   6 +
+ drivers/hid/surface-hid/surface_hid.c      | 256 +++++++++++++++++++
+ drivers/hid/surface-hid/surface_hid_core.c | 272 +++++++++++++++++++++
+ drivers/hid/surface-hid/surface_hid_core.h |  77 ++++++
+ 8 files changed, 650 insertions(+)
+ create mode 100644 drivers/hid/surface-hid/Kconfig
+ create mode 100644 drivers/hid/surface-hid/Makefile
+ create mode 100644 drivers/hid/surface-hid/surface_hid.c
+ create mode 100644 drivers/hid/surface-hid/surface_hid_core.c
+ create mode 100644 drivers/hid/surface-hid/surface_hid_core.h
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index ba2dfafeb28c..d4ebe9f7ae69 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11809,6 +11809,13 @@ S:	Maintained
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
+ F:	drivers/platform/surface/
+ 
++MICROSOFT SURFACE HID TRANSPORT DRIVER
++M:	Maximilian Luz <luzmaximilian@gmail.com>
++L:	linux-input@vger.kernel.org
++L:	platform-driver-x86@vger.kernel.org
++S:	Maintained
++F:	drivers/hid/surface-hid/
++
+ MICROSOFT SURFACE PRO 3 BUTTON DRIVER
+ M:	Chen Yu <yu.c.chen@intel.com>
+ L:	platform-driver-x86@vger.kernel.org
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 09fa75a2b289..e82474897919 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1187,4 +1187,6 @@ source "drivers/hid/intel-ish-hid/Kconfig"
+ 
+ source "drivers/hid/amd-sfh-hid/Kconfig"
+ 
++source "drivers/hid/surface-hid/Kconfig"
++
+ endmenu
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index 014d21fe7dac..98d6b9f666ec 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -144,3 +144,5 @@ obj-$(CONFIG_INTEL_ISH_HID)	+= intel-ish-hid/
+ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER)	+= intel-ish-hid/
+ 
+ obj-$(CONFIG_AMD_SFH_HID)       += amd-sfh-hid/
++
++obj-$(CONFIG_SURFACE_HID_CORE)  += surface-hid/
+diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
+new file mode 100644
+index 000000000000..642c7f0e64fe
+--- /dev/null
++++ b/drivers/hid/surface-hid/Kconfig
+@@ -0,0 +1,28 @@
++# SPDX-License-Identifier: GPL-2.0+
++menu "Surface System Aggregator Module HID support"
++	depends on SURFACE_AGGREGATOR
++	depends on INPUT
++
++config SURFACE_HID
++	tristate "HID transport driver for Surface System Aggregator Module"
++	depends on SURFACE_AGGREGATOR_REGISTRY
++	select SURFACE_HID_CORE
++	help
++	  Driver to support integrated HID devices on newer Microsoft Surface
++	  models.
++
++	  This driver provides support for the HID transport protocol provided
++	  by the Surface Aggregator Module (i.e. the embedded controller) on
++	  7th-generation Microsoft Surface devices, i.e. Surface Book 3 and
++	  Surface Laptop 3. On those models, it is mainly used to connect the
++	  integrated touchpad and keyboard.
++
++	  Say M or Y here, if you want support for integrated HID devices, i.e.
++	  integrated touchpad and keyboard, on 7th generation Microsoft Surface
++	  models.
++
++endmenu
++
++config SURFACE_HID_CORE
++	tristate
++	select HID
+diff --git a/drivers/hid/surface-hid/Makefile b/drivers/hid/surface-hid/Makefile
+new file mode 100644
+index 000000000000..62fc04632d3d
+--- /dev/null
++++ b/drivers/hid/surface-hid/Makefile
+@@ -0,0 +1,6 @@
++# SPDX-License-Identifier: GPL-2.0+
++#
++# Makefile - Surface System Aggregator Module (SSAM) HID transport driver.
++#
++obj-$(CONFIG_SURFACE_HID_CORE)	+= surface_hid_core.o
++obj-$(CONFIG_SURFACE_HID)	+= surface_hid.o
+diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
+new file mode 100644
+index 000000000000..e4477c328536
+--- /dev/null
++++ b/drivers/hid/surface-hid/surface_hid.c
+@@ -0,0 +1,256 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module (SSAM) HID transport driver for the
++ * generic HID interface (HID/TC=0x15 subsystem). Provides support for
++ * integrated HID devices on Surface Laptop 3, Book 3, and later.
++ *
++ * Copyright (C) 2019-2021 Blaž Hrastnik <blaz@mxxn.io>,
++ *                         Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/hid.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++#include "surface_hid_core.h"
++
++
++/* -- SAM interface. -------------------------------------------------------- */
++
++struct surface_hid_buffer_slice {
++	__u8 entry;
++	__le32 offset;
++	__le32 length;
++	__u8 end;
++	__u8 data[];
++} __packed;
++
++static_assert(sizeof(struct surface_hid_buffer_slice) == 10);
++
++enum surface_hid_cid {
++	SURFACE_HID_CID_OUTPUT_REPORT      = 0x01,
++	SURFACE_HID_CID_GET_FEATURE_REPORT = 0x02,
++	SURFACE_HID_CID_SET_FEATURE_REPORT = 0x03,
++	SURFACE_HID_CID_GET_DESCRIPTOR     = 0x04,
++};
++
++static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
++{
++	u8 buffer[sizeof(struct surface_hid_buffer_slice) + 0x76];
++	struct surface_hid_buffer_slice *slice;
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	u32 buffer_len, offset, length;
++	int status;
++
++	/*
++	 * Note: The 0x76 above has been chosen because that's what's used by
++	 * the Windows driver. Together with the header, this leads to a 128
++	 * byte payload in total.
++	 */
++
++	buffer_len = ARRAY_SIZE(buffer) - sizeof(struct surface_hid_buffer_slice);
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_HID_CID_GET_DESCRIPTOR;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(struct surface_hid_buffer_slice);
++	rqst.payload = buffer;
++
++	rsp.capacity = ARRAY_SIZE(buffer);
++	rsp.pointer = buffer;
++
++	slice = (struct surface_hid_buffer_slice *)buffer;
++	slice->entry = entry;
++	slice->end = 0;
++
++	offset = 0;
++	length = buffer_len;
++
++	while (!slice->end && offset < len) {
++		put_unaligned_le32(offset, &slice->offset);
++		put_unaligned_le32(length, &slice->length);
++
++		rsp.length = 0;
++
++		status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
++				    sizeof(*slice));
++		if (status)
++			return status;
++
++		offset = get_unaligned_le32(&slice->offset);
++		length = get_unaligned_le32(&slice->length);
++
++		/* Don't mess stuff up in case we receive garbage. */
++		if (length > buffer_len || offset > len)
++			return -EPROTO;
++
++		if (offset + length > len)
++			length = len - offset;
++
++		memcpy(buf + offset, &slice->data[0], length);
++
++		offset += length;
++		length = buffer_len;
++	}
++
++	if (offset != len) {
++		dev_err(shid->dev, "unexpected descriptor length: got %u, expected %zu\n",
++			offset, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int ssam_hid_set_raw_report(struct surface_hid_device *shid, u8 rprt_id, bool feature,
++				   u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	u8 cid;
++
++	if (feature)
++		cid = SURFACE_HID_CID_SET_FEATURE_REPORT;
++	else
++		cid = SURFACE_HID_CID_OUTPUT_REPORT;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.instance_id = shid->uid.instance;
++	rqst.command_id = cid;
++	rqst.flags = 0;
++	rqst.length = len;
++	rqst.payload = buf;
++
++	buf[0] = rprt_id;
++
++	return ssam_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
++}
++
++static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.instance_id = shid->uid.instance;
++	rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
++	rqst.flags = 0;
++	rqst.length = sizeof(rprt_id);
++	rqst.payload = &rprt_id;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
++}
++
++static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
++{
++	struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
++	int status;
++
++	if (event->command_id != 0x00)
++		return 0;
++
++	status = hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0],
++				  event->length, 0);
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- Transport driver. ----------------------------------------------------- */
++
++static int shid_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	int status;
++
++	status = ssam_hid_set_raw_report(shid, rprt_id, false, buf, len);
++	return status >= 0 ? len : status;
++}
++
++static int shid_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	int status;
++
++	status = ssam_hid_get_raw_report(shid, rprt_id, buf, len);
++	return status >= 0 ? len : status;
++}
++
++static int shid_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	int status;
++
++	status = ssam_hid_set_raw_report(shid, rprt_id, true, buf, len);
++	return status >= 0 ? len : status;
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int surface_hid_probe(struct ssam_device *sdev)
++{
++	struct surface_hid_device *shid;
++
++	shid = devm_kzalloc(&sdev->dev, sizeof(*shid), GFP_KERNEL);
++	if (!shid)
++		return -ENOMEM;
++
++	shid->dev = &sdev->dev;
++	shid->ctrl = sdev->ctrl;
++	shid->uid = sdev->uid;
++
++	shid->notif.base.priority = 1;
++	shid->notif.base.fn = ssam_hid_event_fn;
++	shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
++	shid->notif.event.id.target_category = sdev->uid.category;
++	shid->notif.event.id.instance = sdev->uid.instance;
++	shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
++	shid->notif.event.flags = 0;
++
++	shid->ops.get_descriptor = ssam_hid_get_descriptor;
++	shid->ops.output_report = shid_output_report;
++	shid->ops.get_feature_report = shid_get_feature_report;
++	shid->ops.set_feature_report = shid_set_feature_report;
++
++	ssam_device_set_drvdata(sdev, shid);
++	return surface_hid_device_add(shid);
++}
++
++static void surface_hid_remove(struct ssam_device *sdev)
++{
++	surface_hid_device_destroy(ssam_device_get_drvdata(sdev));
++}
++
++static const struct ssam_device_id surface_hid_match[] = {
++	{ SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_hid_match);
++
++static struct ssam_device_driver surface_hid_driver = {
++	.probe = surface_hid_probe,
++	.remove = surface_hid_remove,
++	.match_table = surface_hid_match,
++	.driver = {
++		.name = "surface_hid",
++		.pm = &surface_hid_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_ssam_device_driver(surface_hid_driver);
++
++MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("HID transport driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
+new file mode 100644
+index 000000000000..2cadb8013529
+--- /dev/null
++++ b/drivers/hid/surface-hid/surface_hid_core.c
+@@ -0,0 +1,272 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Common/core components for the Surface System Aggregator Module (SSAM) HID
++ * transport driver. Provides support for integrated HID devices on Microsoft
++ * Surface models.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/hid.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/usb/ch9.h>
++
++#include <linux/surface_aggregator/controller.h>
++
++#include "surface_hid_core.h"
++
++
++/* -- Device descriptor access. --------------------------------------------- */
++
++static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
++			(u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
++	if (status)
++		return status;
++
++	if (shid->hid_desc.desc_len != sizeof(shid->hid_desc)) {
++		dev_err(shid->dev, "unexpected HID descriptor length: got %u, expected %zu\n",
++			shid->hid_desc.desc_len, sizeof(shid->hid_desc));
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.desc_type != HID_DT_HID) {
++		dev_err(shid->dev, "unexpected HID descriptor type: got %#04x, expected %#04x\n",
++			shid->hid_desc.desc_type, HID_DT_HID);
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.num_descriptors != 1) {
++		dev_err(shid->dev, "unexpected number of descriptors: got %u, expected 1\n",
++			shid->hid_desc.num_descriptors);
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.report_desc_type != HID_DT_REPORT) {
++		dev_err(shid->dev, "unexpected report descriptor type: got %#04x, expected %#04x\n",
++			shid->hid_desc.report_desc_type, HID_DT_REPORT);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
++			(u8 *)&shid->attrs, sizeof(shid->attrs));
++	if (status)
++		return status;
++
++	if (get_unaligned_le32(&shid->attrs.length) != sizeof(shid->attrs)) {
++		dev_err(shid->dev, "unexpected attribute length: got %u, expected %zu\n",
++			get_unaligned_le32(&shid->attrs.length), sizeof(shid->attrs));
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++
++/* -- Transport driver (common). -------------------------------------------- */
++
++static int surface_hid_start(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	return ssam_notifier_register(shid->ctrl, &shid->notif);
++}
++
++static void surface_hid_stop(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	/* Note: This call will log errors for us, so ignore them here. */
++	ssam_notifier_unregister(shid->ctrl, &shid->notif);
++}
++
++static int surface_hid_open(struct hid_device *hid)
++{
++	return 0;
++}
++
++static void surface_hid_close(struct hid_device *hid)
++{
++}
++
++static int surface_hid_parse(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++	size_t len = get_unaligned_le16(&shid->hid_desc.report_desc_len);
++	u8 *buf;
++	int status;
++
++	buf = kzalloc(len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_REPORT, buf, len);
++	if (!status)
++		status = hid_parse_report(hid, buf, len);
++
++	kfree(buf);
++	return status;
++}
++
++static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportnum, u8 *buf,
++				   size_t len, unsigned char rtype, int reqtype)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
++		return shid->ops.output_report(shid, reportnum, buf, len);
++
++	else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT)
++		return shid->ops.get_feature_report(shid, reportnum, buf, len);
++
++	else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT)
++		return shid->ops.set_feature_report(shid, reportnum, buf, len);
++
++	return -EIO;
++}
++
++static struct hid_ll_driver surface_hid_ll_driver = {
++	.start       = surface_hid_start,
++	.stop        = surface_hid_stop,
++	.open        = surface_hid_open,
++	.close       = surface_hid_close,
++	.parse       = surface_hid_parse,
++	.raw_request = surface_hid_raw_request,
++};
++
++
++/* -- Common device setup. -------------------------------------------------- */
++
++int surface_hid_device_add(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = surface_hid_load_hid_descriptor(shid);
++	if (status)
++		return status;
++
++	status = surface_hid_load_device_attributes(shid);
++	if (status)
++		return status;
++
++	shid->hid = hid_allocate_device();
++	if (IS_ERR(shid->hid))
++		return PTR_ERR(shid->hid);
++
++	shid->hid->dev.parent = shid->dev;
++	shid->hid->bus = BUS_SURFACE_AGGREGATOR;
++	shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
++	shid->hid->product = cpu_to_le16(shid->attrs.product);
++	shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
++	shid->hid->country = shid->hid_desc.country_code;
++
++	snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
++		 shid->hid->vendor, shid->hid->product);
++
++	strscpy(shid->hid->phys, dev_name(shid->dev), sizeof(shid->hid->phys));
++
++	shid->hid->driver_data = shid;
++	shid->hid->ll_driver = &surface_hid_ll_driver;
++
++	status = hid_add_device(shid->hid);
++	if (status)
++		hid_destroy_device(shid->hid);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(surface_hid_device_add);
++
++void surface_hid_device_destroy(struct surface_hid_device *shid)
++{
++	hid_destroy_device(shid->hid);
++}
++EXPORT_SYMBOL_GPL(surface_hid_device_destroy);
++
++
++/* -- PM ops. --------------------------------------------------------------- */
++
++#ifdef CONFIG_PM_SLEEP
++
++static int surface_hid_suspend(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
++
++	return 0;
++}
++
++static int surface_hid_resume(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->resume)
++		return d->hid->driver->resume(d->hid);
++
++	return 0;
++}
++
++static int surface_hid_freeze(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
++
++	return 0;
++}
++
++static int surface_hid_poweroff(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
++
++	return 0;
++}
++
++static int surface_hid_restore(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->reset_resume)
++		return d->hid->driver->reset_resume(d->hid);
++
++	return 0;
++}
++
++const struct dev_pm_ops surface_hid_pm_ops = {
++	.freeze   = surface_hid_freeze,
++	.thaw     = surface_hid_resume,
++	.suspend  = surface_hid_suspend,
++	.resume   = surface_hid_resume,
++	.poweroff = surface_hid_poweroff,
++	.restore  = surface_hid_restore,
++};
++EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
++
++#else /* CONFIG_PM_SLEEP */
++
++const struct dev_pm_ops surface_hid_pm_ops = { };
++EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
++
++#endif /* CONFIG_PM_SLEEP */
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("HID transport driver core for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/surface-hid/surface_hid_core.h b/drivers/hid/surface-hid/surface_hid_core.h
+new file mode 100644
+index 000000000000..4b1a7b57e035
+--- /dev/null
++++ b/drivers/hid/surface-hid/surface_hid_core.h
+@@ -0,0 +1,77 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Common/core components for the Surface System Aggregator Module (SSAM) HID
++ * transport driver. Provides support for integrated HID devices on Microsoft
++ * Surface models.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef SURFACE_HID_CORE_H
++#define SURFACE_HID_CORE_H
++
++#include <linux/hid.h>
++#include <linux/pm.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++enum surface_hid_descriptor_entry {
++	SURFACE_HID_DESC_HID    = 0,
++	SURFACE_HID_DESC_REPORT = 1,
++	SURFACE_HID_DESC_ATTRS  = 2,
++};
++
++struct surface_hid_descriptor {
++	__u8 desc_len;			/* = 9 */
++	__u8 desc_type;			/* = HID_DT_HID */
++	__le16 hid_version;
++	__u8 country_code;
++	__u8 num_descriptors;		/* = 1 */
++
++	__u8 report_desc_type;		/* = HID_DT_REPORT */
++	__le16 report_desc_len;
++} __packed;
++
++static_assert(sizeof(struct surface_hid_descriptor) == 9);
++
++struct surface_hid_attributes {
++	__le32 length;
++	__le16 vendor;
++	__le16 product;
++	__le16 version;
++	__u8 _unknown[22];
++} __packed;
++
++static_assert(sizeof(struct surface_hid_attributes) == 32);
++
++struct surface_hid_device;
++
++struct surface_hid_device_ops {
++	int (*get_descriptor)(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len);
++	int (*output_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
++	int (*get_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
++	int (*set_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
++};
++
++struct surface_hid_device {
++	struct device *dev;
++	struct ssam_controller *ctrl;
++	struct ssam_device_uid uid;
++
++	struct surface_hid_descriptor hid_desc;
++	struct surface_hid_attributes attrs;
++
++	struct ssam_event_notifier notif;
++	struct hid_device *hid;
++
++	struct surface_hid_device_ops ops;
++};
++
++int surface_hid_device_add(struct surface_hid_device *shid);
++void surface_hid_device_destroy(struct surface_hid_device *shid);
++
++extern const struct dev_pm_ops surface_hid_pm_ops;
++
++#endif /* SURFACE_HID_CORE_H */
+-- 
+2.30.1
+
+From b50a40d767e1cd4479c879599d4996b4191fa61f Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 11 Feb 2021 20:10:17 +0100
+Subject: [PATCH] HID: surface-hid: Add support for legacy keyboard interface
+
+Add support for the legacy keyboard (KBD/TC=0x08) HID transport layer of
+the Surface System Aggregator Module (SSAM) to the Surface HID driver.
+On Surface Laptops 1 and 2, this interface is used to connect the
+integrated keyboard.
+
+Note that this subsystem interface essentially provides a limited HID
+transport layer. In contras to the generic HID interface (TC=0x15) used
+on newer Surface models, this interface only allows (as far as we know)
+for a single device to be connected and is otherwise severely limited in
+terms of support for feature- and output-reports. Specifically, only
+caps-lock-LED output-reports and a single read-only feature-report are
+supported.
+
+Patchset: surface-sam
+---
+ drivers/hid/surface-hid/Kconfig       |  14 ++
+ drivers/hid/surface-hid/Makefile      |   1 +
+ drivers/hid/surface-hid/surface_kbd.c | 303 ++++++++++++++++++++++++++
+ 3 files changed, 318 insertions(+)
+ create mode 100644 drivers/hid/surface-hid/surface_kbd.c
+
+diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
+index 642c7f0e64fe..7ce9b5d641eb 100644
+--- a/drivers/hid/surface-hid/Kconfig
++++ b/drivers/hid/surface-hid/Kconfig
+@@ -21,6 +21,20 @@ config SURFACE_HID
+ 	  integrated touchpad and keyboard, on 7th generation Microsoft Surface
+ 	  models.
+ 
++config SURFACE_KBD
++	tristate "HID keyboard transport driver for Surface System Aggregator Module"
++	select SURFACE_HID_CORE
++	help
++	  Driver to support HID keyboards on Surface Laptop 1 and 2 devices.
++
++	  This driver provides support for the HID transport protocol provided
++	  by the Surface Aggregator Module (i.e. the embedded controller) on
++	  Microsoft Surface Laptops 1 and 2. It is used to connect the
++	  integrated keyboard on those devices.
++
++	  Say M or Y here, if you want support for the integrated keyboard on
++	  Microsoft Surface Laptops 1 and 2.
++
+ endmenu
+ 
+ config SURFACE_HID_CORE
+diff --git a/drivers/hid/surface-hid/Makefile b/drivers/hid/surface-hid/Makefile
+index 62fc04632d3d..4ae11cf09b25 100644
+--- a/drivers/hid/surface-hid/Makefile
++++ b/drivers/hid/surface-hid/Makefile
+@@ -4,3 +4,4 @@
+ #
+ obj-$(CONFIG_SURFACE_HID_CORE)	+= surface_hid_core.o
+ obj-$(CONFIG_SURFACE_HID)	+= surface_hid.o
++obj-$(CONFIG_SURFACE_KBD)	+= surface_kbd.o
+diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
+new file mode 100644
+index 000000000000..e72baac952ec
+--- /dev/null
++++ b/drivers/hid/surface-hid/surface_kbd.c
+@@ -0,0 +1,303 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module (SSAM) HID transport driver for the legacy
++ * keyboard interface (KBD/TC=0x08 subsystem). Provides support for the
++ * integrated HID keyboard on Surface Laptops 1 and 2.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/hid.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++
++#include "surface_hid_core.h"
++
++
++/* -- SAM interface (KBD). -------------------------------------------------- */
++
++#define KBD_FEATURE_REPORT_SIZE			7  /* 6 + report ID */
++
++enum surface_kbd_cid {
++	SURFACE_KBD_CID_GET_DESCRIPTOR		= 0x00,
++	SURFACE_KBD_CID_SET_CAPSLOCK_LED	= 0x01,
++	SURFACE_KBD_CID_EVT_INPUT_GENERIC	= 0x03,
++	SURFACE_KBD_CID_EVT_INPUT_HOTKEYS	= 0x04,
++	SURFACE_KBD_CID_GET_FEATURE_REPORT	= 0x0b,
++};
++
++static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	int status;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_GET_DESCRIPTOR;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(entry);
++	rqst.payload = &entry;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
++	if (status)
++		return status;
++
++	if (rsp.length != len) {
++		dev_err(shid->dev, "invalid descriptor length: got %zu, expected, %zu\n",
++			rsp.length, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
++{
++	struct ssam_request rqst;
++	u8 value_u8 = value;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_SET_CAPSLOCK_LED;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = 0;
++	rqst.length = sizeof(value_u8);
++	rqst.payload = &value_u8;
++
++	return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
++}
++
++static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	u8 payload = 0;
++	int status;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_GET_FEATURE_REPORT;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(payload);
++	rqst.payload = &payload;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
++	if (status)
++		return status;
++
++	if (rsp.length != len) {
++		dev_err(shid->dev, "invalid feature report length: got %zu, expected, %zu\n",
++			rsp.length, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static bool ssam_kbd_is_input_event(const struct ssam_event *event)
++{
++	if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_GENERIC)
++		return true;
++
++	if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_HOTKEYS)
++		return true;
++
++	return false;
++}
++
++static u32 ssam_kbd_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
++{
++	struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
++	int status;
++
++	/*
++	 * Check against device UID manually, as registry and device target
++	 * category doesn't line up.
++	 */
++
++	if (shid->uid.category != event->target_category)
++		return 0;
++
++	if (shid->uid.target != event->target_id)
++		return 0;
++
++	if (shid->uid.instance != event->instance_id)
++		return 0;
++
++	if (!ssam_kbd_is_input_event(event))
++		return 0;
++
++	status = hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0],
++				  event->length, 0);
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- Transport driver (KBD). ----------------------------------------------- */
++
++static int skbd_get_caps_led_value(struct hid_device *hid, u8 rprt_id, u8 *buf, size_t len)
++{
++	struct hid_field *field;
++	unsigned int offset, size;
++	int i;
++
++	/* Get LED field. */
++	field = hidinput_get_led_field(hid);
++	if (!field)
++		return -ENOENT;
++
++	/* Check if we got the correct report. */
++	if (len != hid_report_len(field->report))
++		return -ENOENT;
++
++	if (rprt_id != field->report->id)
++		return -ENOENT;
++
++	/* Get caps lock LED index. */
++	for (i = 0; i < field->report_count; i++)
++		if ((field->usage[i].hid & 0xffff) == 0x02)
++			break;
++
++	if (i == field->report_count)
++		return -ENOENT;
++
++	/* Extract value. */
++	size = field->report_size;
++	offset = field->report_offset + i * size;
++	return !!hid_field_extract(hid, buf + 1, size, offset);
++}
++
++static int skbd_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	int caps_led;
++	int status;
++
++	caps_led = skbd_get_caps_led_value(shid->hid, rprt_id, buf, len);
++	if (caps_led < 0)
++		return -EIO;  /* Only caps LED output reports are supported. */
++
++	status = ssam_kbd_set_caps_led(shid, caps_led);
++	if (status < 0)
++		return status;
++
++	return len;
++}
++
++static int skbd_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	u8 report[KBD_FEATURE_REPORT_SIZE];
++	int status;
++
++	/*
++	 * The keyboard only has a single hard-coded read-only feature report
++	 * of size KBD_FEATURE_REPORT_SIZE. Try to load it and compare its
++	 * report ID against the requested one.
++	 */
++
++	if (len < ARRAY_SIZE(report))
++		return -ENOSPC;
++
++	status = ssam_kbd_get_feature_report(shid, report, ARRAY_SIZE(report));
++	if (status < 0)
++		return status;
++
++	if (rprt_id != report[0])
++		return -ENOENT;
++
++	memcpy(buf, report, ARRAY_SIZE(report));
++	return len;
++}
++
++static int skbd_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
++{
++	/* Not supported. See skbd_get_feature_report() for details. */
++	return -EIO;
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int surface_kbd_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct surface_hid_device *shid;
++
++	/* Add device link to EC. */
++	ctrl = ssam_client_bind(&pdev->dev);
++	if (IS_ERR(ctrl))
++		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
++
++	shid = devm_kzalloc(&pdev->dev, sizeof(*shid), GFP_KERNEL);
++	if (!shid)
++		return -ENOMEM;
++
++	shid->dev = &pdev->dev;
++	shid->ctrl = ctrl;
++
++	shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
++	shid->uid.category = SSAM_SSH_TC_KBD;
++	shid->uid.target = 2;
++	shid->uid.instance = 0;
++	shid->uid.function = 0;
++
++	shid->notif.base.priority = 1;
++	shid->notif.base.fn = ssam_kbd_event_fn;
++	shid->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	shid->notif.event.id.target_category = shid->uid.category;
++	shid->notif.event.id.instance = shid->uid.instance;
++	shid->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	shid->notif.event.flags = 0;
++
++	shid->ops.get_descriptor = ssam_kbd_get_descriptor;
++	shid->ops.output_report = skbd_output_report;
++	shid->ops.get_feature_report = skbd_get_feature_report;
++	shid->ops.set_feature_report = skbd_set_feature_report;
++
++	platform_set_drvdata(pdev, shid);
++	return surface_hid_device_add(shid);
++}
++
++static int surface_kbd_remove(struct platform_device *pdev)
++{
++	surface_hid_device_destroy(platform_get_drvdata(pdev));
++	return 0;
++}
++
++static const struct acpi_device_id surface_kbd_match[] = {
++	{ "MSHW0096" },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
++
++static struct platform_driver surface_kbd_driver = {
++	.probe = surface_kbd_probe,
++	.remove = surface_kbd_remove,
++	.driver = {
++		.name = "surface_keyboard",
++		.acpi_match_table = surface_kbd_match,
++		.pm = &surface_hid_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_kbd_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("HID legacy transport driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+
+From 883c16b19d9588593a8fc403bf5cf990a1d4e281 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Fri, 12 Feb 2021 21:06:12 +0100
+Subject: [PATCH] power: supply: Add battery driver for Surface Aggregator
+ Module
+
+On newer Microsoft Surface models (specifically 7th-generation, i.e.
+Surface Pro 7, Surface Book 3, Surface Laptop 3, and Surface Laptop Go),
+battery and AC status/information is no longer handled via standard ACPI
+devices, but instead directly via the Surface System Aggregator Module
+(SSAM), i.e. the embedded controller on those devices.
+
+While on previous generation models, battery status is also handled via
+SSAM, an ACPI shim was present to translate the standard ACPI battery
+interface to SSAM requests. The SSAM interface itself, which is modeled
+closely after the ACPI interface, has not changed.
+
+This commit introduces a new SSAM client device driver to support
+battery status/information via the aforementioned interface on said
+Surface models. It is in parts based on the standard ACPI battery
+driver.
+
+Patchset: surface-sam
+---
+ MAINTAINERS                            |   7 +
+ drivers/power/supply/Kconfig           |  16 +
+ drivers/power/supply/Makefile          |   1 +
+ drivers/power/supply/surface_battery.c | 901 +++++++++++++++++++++++++
+ 4 files changed, 925 insertions(+)
+ create mode 100644 drivers/power/supply/surface_battery.c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d4ebe9f7ae69..b107e059770f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11786,6 +11786,13 @@ F:	drivers/scsi/smartpqi/smartpqi*.[ch]
+ F:	include/linux/cciss*.h
+ F:	include/uapi/linux/cciss*.h
+ 
++MICROSOFT SURFACE BATTERY AND AC DRIVERS
++M:	Maximilian Luz <luzmaximilian@gmail.com>
++L:	linux-pm@vger.kernel.org
++L:	platform-driver-x86@vger.kernel.org
++S:	Maintained
++F:	drivers/power/supply/surface_battery.c
++
+ MICROSOFT SURFACE DTX DRIVER
+ M:	Maximilian Luz <luzmaximilian@gmail.com>
+ L:	platform-driver-x86@vger.kernel.org
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index eec646c568b7..ed132dde0735 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -774,4 +774,20 @@ config RN5T618_POWER
+ 	  This driver can also be built as a module. If so, the module will be
+ 	  called rn5t618_power.
+ 
++config BATTERY_SURFACE
++	tristate "Battery driver for 7th-generation Microsoft Surface devices"
++	depends on SURFACE_AGGREGATOR_REGISTRY
++	help
++	  Driver for battery devices connected via/managed by the Surface System
++	  Aggregator Module (SSAM).
++
++	  This driver provides battery-information and -status support for
++	  Surface devices where said data is not exposed via the standard ACPI
++	  devices. On those models (7th-generation), battery-information is
++	  instead handled directly via SSAM client devices and this driver.
++
++	  Say M or Y here to include battery status support for 7th-generation
++	  Microsoft Surface devices, i.e. Surface Pro 7, Surface Laptop 3,
++	  Surface Book 3, and Surface Laptop Go.
++
+ endif # POWER_SUPPLY
+diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
+index dd4b86318cd9..cddc18994119 100644
+--- a/drivers/power/supply/Makefile
++++ b/drivers/power/supply/Makefile
+@@ -98,3 +98,4 @@ obj-$(CONFIG_CHARGER_BD70528)	+= bd70528-charger.o
+ obj-$(CONFIG_CHARGER_BD99954)	+= bd99954-charger.o
+ obj-$(CONFIG_CHARGER_WILCO)	+= wilco-charger.o
+ obj-$(CONFIG_RN5T618_POWER)	+= rn5t618_power.o
++obj-$(CONFIG_BATTERY_SURFACE)	+= surface_battery.o
+diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
+new file mode 100644
+index 000000000000..327fd7af386b
+--- /dev/null
++++ b/drivers/power/supply/surface_battery.c
+@@ -0,0 +1,901 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Battery driver for 7th-generation Microsoft Surface devices via Surface
++ * System Aggregator Module (SSAM).
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/power_supply.h>
++#include <linux/sysfs.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/device.h>
++
++
++/* -- SAM interface. -------------------------------------------------------- */
++
++enum sam_event_cid_bat {
++	SAM_EVENT_CID_BAT_BIX         = 0x15,
++	SAM_EVENT_CID_BAT_BST         = 0x16,
++	SAM_EVENT_CID_BAT_ADP         = 0x17,
++	SAM_EVENT_CID_BAT_PROT        = 0x18,
++	SAM_EVENT_CID_BAT_DPTF        = 0x53,
++};
++
++enum sam_battery_sta {
++	SAM_BATTERY_STA_OK            = 0x0f,
++	SAM_BATTERY_STA_PRESENT	      = 0x10,
++};
++
++enum sam_battery_state {
++	SAM_BATTERY_STATE_DISCHARGING = BIT(0),
++	SAM_BATTERY_STATE_CHARGING    = BIT(1),
++	SAM_BATTERY_STATE_CRITICAL    = BIT(2),
++};
++
++enum sam_battery_power_unit {
++	SAM_BATTERY_POWER_UNIT_mW     = 0,
++	SAM_BATTERY_POWER_UNIT_mA     = 1,
++};
++
++/* Equivalent to data returned in ACPI _BIX method, revision 0. */
++struct spwr_bix {
++	u8  revision;
++	__le32 power_unit;
++	__le32 design_cap;
++	__le32 last_full_charge_cap;
++	__le32 technology;
++	__le32 design_voltage;
++	__le32 design_cap_warn;
++	__le32 design_cap_low;
++	__le32 cycle_count;
++	__le32 measurement_accuracy;
++	__le32 max_sampling_time;
++	__le32 min_sampling_time;
++	__le32 max_avg_interval;
++	__le32 min_avg_interval;
++	__le32 bat_cap_granularity_1;
++	__le32 bat_cap_granularity_2;
++	__u8 model[21];
++	__u8 serial[11];
++	__u8 type[5];
++	__u8 oem_info[21];
++} __packed;
++
++static_assert(sizeof(struct spwr_bix) == 119);
++
++/* Equivalent to data returned in ACPI _BST method. */
++struct spwr_bst {
++	__le32 state;
++	__le32 present_rate;
++	__le32 remaining_cap;
++	__le32 present_voltage;
++} __packed;
++
++static_assert(sizeof(struct spwr_bst) == 16);
++
++#define SPWR_BIX_REVISION		0
++#define SPWR_BATTERY_VALUE_UNKNOWN	0xffffffff
++
++/* Get battery status (_STA) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x01,
++});
++
++/* Get battery static information (_BIX). */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bix, struct spwr_bix, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x02,
++});
++
++/* Get battery dynamic information (_BST). */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bst, struct spwr_bst, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x03,
++});
++
++/* Set battery trip point (_BTP). */
++static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_btp, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x04,
++});
++
++
++/* -- Device structures. ---------------------------------------------------- */
++
++struct spwr_psy_properties {
++	const char *name;
++	struct ssam_event_registry registry;
++};
++
++struct spwr_battery_device {
++	struct ssam_device *sdev;
++
++	char name[32];
++	struct power_supply *psy;
++	struct power_supply_desc psy_desc;
++
++	struct delayed_work update_work;
++
++	struct ssam_event_notifier notif;
++
++	struct mutex lock;  /* Guards access to state data below. */
++	unsigned long timestamp;
++
++	__le32 sta;
++	struct spwr_bix bix;
++	struct spwr_bst bst;
++	u32 alarm;
++};
++
++
++/* -- Module parameters. ---------------------------------------------------- */
++
++static unsigned int cache_time = 1000;
++module_param(cache_time, uint, 0644);
++MODULE_PARM_DESC(cache_time, "battery state caching time in milliseconds [default: 1000]");
++
++
++/* -- State management. ----------------------------------------------------- */
++
++/*
++ * Delay for battery update quirk. See spwr_battery_recheck_adapter() below
++ * for more details.
++ */
++#define SPWR_AC_BAT_UPDATE_DELAY	msecs_to_jiffies(5000)
++
++static bool spwr_battery_present(struct spwr_battery_device *bat)
++{
++	lockdep_assert_held(&bat->lock);
++
++	return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
++}
++
++static int spwr_battery_load_sta(struct spwr_battery_device *bat)
++{
++	lockdep_assert_held(&bat->lock);
++
++	return ssam_retry(ssam_bat_get_sta, bat->sdev, &bat->sta);
++}
++
++static int spwr_battery_load_bix(struct spwr_battery_device *bat)
++{
++	int status;
++
++	lockdep_assert_held(&bat->lock);
++
++	if (!spwr_battery_present(bat))
++		return 0;
++
++	status = ssam_retry(ssam_bat_get_bix, bat->sdev, &bat->bix);
++
++	/* Enforce NULL terminated strings in case anything goes wrong... */
++	bat->bix.model[ARRAY_SIZE(bat->bix.model) - 1] = 0;
++	bat->bix.serial[ARRAY_SIZE(bat->bix.serial) - 1] = 0;
++	bat->bix.type[ARRAY_SIZE(bat->bix.type) - 1] = 0;
++	bat->bix.oem_info[ARRAY_SIZE(bat->bix.oem_info) - 1] = 0;
++
++	return status;
++}
++
++static int spwr_battery_load_bst(struct spwr_battery_device *bat)
++{
++	lockdep_assert_held(&bat->lock);
++
++	if (!spwr_battery_present(bat))
++		return 0;
++
++	return ssam_retry(ssam_bat_get_bst, bat->sdev, &bat->bst);
++}
++
++static int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value)
++{
++	__le32 value_le = cpu_to_le32(value);
++
++	lockdep_assert_held(&bat->lock);
++
++	bat->alarm = value;
++	return ssam_retry(ssam_bat_set_btp, bat->sdev, &value_le);
++}
++
++static int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat, bool cached)
++{
++	unsigned long cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
++	int status;
++
++	lockdep_assert_held(&bat->lock);
++
++	if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
++		return 0;
++
++	status = spwr_battery_load_sta(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bst(bat);
++	if (status)
++		return status;
++
++	bat->timestamp = jiffies;
++	return 0;
++}
++
++static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
++{
++	int status;
++
++	mutex_lock(&bat->lock);
++	status = spwr_battery_update_bst_unlocked(bat, cached);
++	mutex_unlock(&bat->lock);
++
++	return status;
++}
++
++static int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
++{
++	int status;
++
++	lockdep_assert_held(&bat->lock);
++
++	status = spwr_battery_load_sta(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bix(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bst(bat);
++	if (status)
++		return status;
++
++	if (bat->bix.revision != SPWR_BIX_REVISION)
++		dev_warn(&bat->sdev->dev, "unsupported battery revision: %u\n", bat->bix.revision);
++
++	bat->timestamp = jiffies;
++	return 0;
++}
++
++static u32 sprw_battery_get_full_cap_safe(struct spwr_battery_device *bat)
++{
++	u32 full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
++
++	lockdep_assert_held(&bat->lock);
++
++	if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		full_cap = get_unaligned_le32(&bat->bix.design_cap);
++
++	return full_cap;
++}
++
++static bool spwr_battery_is_full(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 full_cap = sprw_battery_get_full_cap_safe(bat);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	lockdep_assert_held(&bat->lock);
++
++	return full_cap != SPWR_BATTERY_VALUE_UNKNOWN && full_cap != 0 &&
++		remaining_cap != SPWR_BATTERY_VALUE_UNKNOWN &&
++		remaining_cap >= full_cap &&
++		state == 0;
++}
++
++static int spwr_battery_recheck_full(struct spwr_battery_device *bat)
++{
++	bool present;
++	u32 unit;
++	int status;
++
++	mutex_lock(&bat->lock);
++	unit = get_unaligned_le32(&bat->bix.power_unit);
++	present = spwr_battery_present(bat);
++
++	status = spwr_battery_update_bix_unlocked(bat);
++	if (status)
++		goto out;
++
++	/* If battery has been attached, (re-)initialize alarm. */
++	if (!present && spwr_battery_present(bat)) {
++		u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
++
++		status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
++		if (status)
++			goto out;
++	}
++
++	/*
++	 * Warn if the unit has changed. This is something we genuinely don't
++	 * expect to happen, so make this a big warning. If it does, we'll
++	 * need to add support for it.
++	 */
++	WARN_ON(unit != get_unaligned_le32(&bat->bix.power_unit));
++
++out:
++	mutex_unlock(&bat->lock);
++
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	return status;
++}
++
++static int spwr_battery_recheck_status(struct spwr_battery_device *bat)
++{
++	int status;
++
++	status = spwr_battery_update_bst(bat, false);
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	return status;
++}
++
++static int spwr_battery_recheck_adapter(struct spwr_battery_device *bat)
++{
++	/*
++	 * Handle battery update quirk: When the battery is fully charged (or
++	 * charged up to the limit imposed by the UEFI battery limit) and the
++	 * adapter is plugged in or removed, the EC does not send a separate
++	 * event for the state (charging/discharging) change. Furthermore it
++	 * may take some time until the state is updated on the battery.
++	 * Schedule an update to solve this.
++	 */
++
++	schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
++	return 0;
++}
++
++static u32 spwr_notify_bat(struct ssam_event_notifier *nf, const struct ssam_event *event)
++{
++	struct spwr_battery_device *bat = container_of(nf, struct spwr_battery_device, notif);
++	int status;
++
++	dev_dbg(&bat->sdev->dev, "power event (cid = %#04x, iid = %#04x, tid = %#04x)\n",
++		event->command_id, event->instance_id, event->target_id);
++
++	/* Handled here, needs to be handled for all targets/instances. */
++	if (event->command_id == SAM_EVENT_CID_BAT_ADP) {
++		status = spwr_battery_recheck_adapter(bat);
++		return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++	}
++
++	if (bat->sdev->uid.target != event->target_id)
++		return 0;
++
++	if (bat->sdev->uid.instance != event->instance_id)
++		return 0;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_BIX:
++		status = spwr_battery_recheck_full(bat);
++		break;
++
++	case SAM_EVENT_CID_BAT_BST:
++		status = spwr_battery_recheck_status(bat);
++		break;
++
++	case SAM_EVENT_CID_BAT_PROT:
++		/*
++		 * TODO: Implement support for battery protection status change
++		 *       event.
++		 */
++		status = 0;
++		break;
++
++	case SAM_EVENT_CID_BAT_DPTF:
++		/*
++		 * TODO: Implement support for DPTF event.
++		 */
++		status = 0;
++		break;
++
++	default:
++		return 0;
++	}
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++static void spwr_battery_update_bst_workfn(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct spwr_battery_device *bat;
++	int status;
++
++	bat = container_of(dwork, struct spwr_battery_device, update_work);
++
++	status = spwr_battery_update_bst(bat, false);
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	if (status)
++		dev_err(&bat->sdev->dev, "failed to update battery state: %d\n", status);
++}
++
++
++/* -- Properties. ----------------------------------------------------------- */
++
++static enum power_supply_property spwr_battery_props_chg[] = {
++	POWER_SUPPLY_PROP_STATUS,
++	POWER_SUPPLY_PROP_PRESENT,
++	POWER_SUPPLY_PROP_TECHNOLOGY,
++	POWER_SUPPLY_PROP_CYCLE_COUNT,
++	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
++	POWER_SUPPLY_PROP_VOLTAGE_NOW,
++	POWER_SUPPLY_PROP_CURRENT_NOW,
++	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
++	POWER_SUPPLY_PROP_CHARGE_FULL,
++	POWER_SUPPLY_PROP_CHARGE_NOW,
++	POWER_SUPPLY_PROP_CAPACITY,
++	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
++	POWER_SUPPLY_PROP_MODEL_NAME,
++	POWER_SUPPLY_PROP_MANUFACTURER,
++	POWER_SUPPLY_PROP_SERIAL_NUMBER,
++};
++
++static enum power_supply_property spwr_battery_props_eng[] = {
++	POWER_SUPPLY_PROP_STATUS,
++	POWER_SUPPLY_PROP_PRESENT,
++	POWER_SUPPLY_PROP_TECHNOLOGY,
++	POWER_SUPPLY_PROP_CYCLE_COUNT,
++	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
++	POWER_SUPPLY_PROP_VOLTAGE_NOW,
++	POWER_SUPPLY_PROP_POWER_NOW,
++	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
++	POWER_SUPPLY_PROP_ENERGY_FULL,
++	POWER_SUPPLY_PROP_ENERGY_NOW,
++	POWER_SUPPLY_PROP_CAPACITY,
++	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
++	POWER_SUPPLY_PROP_MODEL_NAME,
++	POWER_SUPPLY_PROP_MANUFACTURER,
++	POWER_SUPPLY_PROP_SERIAL_NUMBER,
++};
++
++static int spwr_battery_prop_status(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
++
++	lockdep_assert_held(&bat->lock);
++
++	if (state & SAM_BATTERY_STATE_DISCHARGING)
++		return POWER_SUPPLY_STATUS_DISCHARGING;
++
++	if (state & SAM_BATTERY_STATE_CHARGING)
++		return POWER_SUPPLY_STATUS_CHARGING;
++
++	if (spwr_battery_is_full(bat))
++		return POWER_SUPPLY_STATUS_FULL;
++
++	if (present_rate == 0)
++		return POWER_SUPPLY_STATUS_NOT_CHARGING;
++
++	return POWER_SUPPLY_STATUS_UNKNOWN;
++}
++
++static int spwr_battery_prop_technology(struct spwr_battery_device *bat)
++{
++	lockdep_assert_held(&bat->lock);
++
++	if (!strcasecmp("NiCd", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_NiCd;
++
++	if (!strcasecmp("NiMH", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_NiMH;
++
++	if (!strcasecmp("LION", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_LION;
++
++	if (!strncasecmp("LI-ION", bat->bix.type, 6))
++		return POWER_SUPPLY_TECHNOLOGY_LION;
++
++	if (!strcasecmp("LiP", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_LIPO;
++
++	return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
++}
++
++static int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
++{
++	u32 full_cap = sprw_battery_get_full_cap_safe(bat);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	lockdep_assert_held(&bat->lock);
++
++	if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return -ENODEV;
++
++	if (remaining_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return -ENODEV;
++
++	return remaining_cap * 100 / full_cap;
++}
++
++static int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	lockdep_assert_held(&bat->lock);
++
++	if (state & SAM_BATTERY_STATE_CRITICAL)
++		return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
++
++	if (spwr_battery_is_full(bat))
++		return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
++
++	if (remaining_cap <= bat->alarm)
++		return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
++
++	return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
++}
++
++static int spwr_battery_get_property(struct power_supply *psy, enum power_supply_property psp,
++				     union power_supply_propval *val)
++{
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++	u32 value;
++	int status;
++
++	mutex_lock(&bat->lock);
++
++	status = spwr_battery_update_bst_unlocked(bat, true);
++	if (status)
++		goto out;
++
++	/* Abort if battery is not present. */
++	if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
++		status = -ENODEV;
++		goto out;
++	}
++
++	switch (psp) {
++	case POWER_SUPPLY_PROP_STATUS:
++		val->intval = spwr_battery_prop_status(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_PRESENT:
++		val->intval = spwr_battery_present(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_TECHNOLOGY:
++		val->intval = spwr_battery_prop_technology(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_CYCLE_COUNT:
++		value = get_unaligned_le32(&bat->bix.cycle_count);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
++		value = get_unaligned_le32(&bat->bix.design_voltage);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
++		value = get_unaligned_le32(&bat->bst.present_voltage);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CURRENT_NOW:
++	case POWER_SUPPLY_PROP_POWER_NOW:
++		value = get_unaligned_le32(&bat->bst.present_rate);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
++	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
++		value = get_unaligned_le32(&bat->bix.design_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_FULL:
++	case POWER_SUPPLY_PROP_ENERGY_FULL:
++		value = get_unaligned_le32(&bat->bix.last_full_charge_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_NOW:
++	case POWER_SUPPLY_PROP_ENERGY_NOW:
++		value = get_unaligned_le32(&bat->bst.remaining_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CAPACITY:
++		val->intval = spwr_battery_prop_capacity(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
++		val->intval = spwr_battery_prop_capacity_level(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_MODEL_NAME:
++		val->strval = bat->bix.model;
++		break;
++
++	case POWER_SUPPLY_PROP_MANUFACTURER:
++		val->strval = bat->bix.oem_info;
++		break;
++
++	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
++		val->strval = bat->bix.serial;
++		break;
++
++	default:
++		status = -EINVAL;
++		break;
++	}
++
++out:
++	mutex_unlock(&bat->lock);
++	return status;
++}
++
++
++/* -- Alarm attribute. ------------------------------------------------------ */
++
++static ssize_t spwr_battery_alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	struct power_supply *psy = dev_get_drvdata(dev);
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++	int status;
++
++	mutex_lock(&bat->lock);
++	status = sysfs_emit(buf, "%d\n", bat->alarm * 1000);
++	mutex_unlock(&bat->lock);
++
++	return status;
++}
++
++static ssize_t spwr_battery_alarm_store(struct device *dev, struct device_attribute *attr,
++					const char *buf, size_t count)
++{
++	struct power_supply *psy = dev_get_drvdata(dev);
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++	unsigned long value;
++	int status;
++
++	status = kstrtoul(buf, 0, &value);
++	if (status)
++		return status;
++
++	mutex_lock(&bat->lock);
++
++	if (!spwr_battery_present(bat)) {
++		mutex_unlock(&bat->lock);
++		return -ENODEV;
++	}
++
++	status = spwr_battery_set_alarm_unlocked(bat, value / 1000);
++	if (status) {
++		mutex_unlock(&bat->lock);
++		return status;
++	}
++
++	mutex_unlock(&bat->lock);
++	return count;
++}
++
++static const struct device_attribute alarm_attr = {
++	.attr = {.name = "alarm", .mode = 0644},
++	.show = spwr_battery_alarm_show,
++	.store = spwr_battery_alarm_store,
++};
++
++
++/* -- Device setup. --------------------------------------------------------- */
++
++static void spwr_battery_init(struct spwr_battery_device *bat, struct ssam_device *sdev,
++			      struct ssam_event_registry registry, const char *name)
++{
++	mutex_init(&bat->lock);
++	strncpy(bat->name, name, ARRAY_SIZE(bat->name) - 1);
++
++	bat->sdev = sdev;
++
++	bat->notif.base.priority = 1;
++	bat->notif.base.fn = spwr_notify_bat;
++	bat->notif.event.reg = registry;
++	bat->notif.event.id.target_category = sdev->uid.category;
++	bat->notif.event.id.instance = 0;
++	bat->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	bat->psy_desc.name = bat->name;
++	bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
++	bat->psy_desc.get_property = spwr_battery_get_property;
++
++	INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
++}
++
++static void spwr_battery_destroy(struct spwr_battery_device *bat)
++{
++	mutex_destroy(&bat->lock);
++}
++
++static int spwr_battery_register(struct spwr_battery_device *bat)
++{
++	struct power_supply_config psy_cfg = {};
++	__le32 sta;
++	int status;
++
++	/* Make sure the device is there and functioning properly. */
++	status = ssam_retry(ssam_bat_get_sta, bat->sdev, &sta);
++	if (status)
++		return status;
++
++	if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
++		return -ENODEV;
++
++	/* Satisfy lockdep although we are in an exclusive context here. */
++	mutex_lock(&bat->lock);
++
++	status = spwr_battery_update_bix_unlocked(bat);
++	if (status) {
++		mutex_unlock(&bat->lock);
++		return status;
++	}
++
++	if (spwr_battery_present(bat)) {
++		u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
++
++		status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
++		if (status) {
++			mutex_unlock(&bat->lock);
++			return status;
++		}
++	}
++
++	mutex_unlock(&bat->lock);
++
++	switch (get_unaligned_le32(&bat->bix.power_unit)) {
++	case SAM_BATTERY_POWER_UNIT_mW:
++		bat->psy_desc.properties = spwr_battery_props_eng;
++		bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
++		break;
++
++	case SAM_BATTERY_POWER_UNIT_mA:
++		bat->psy_desc.properties = spwr_battery_props_chg;
++		bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
++		break;
++
++	default:
++		dev_err(&bat->sdev->dev, "unsupported battery power unit: %u\n",
++			get_unaligned_le32(&bat->bix.power_unit));
++		return -EINVAL;
++	}
++
++	psy_cfg.drv_data = bat;
++	bat->psy = power_supply_register(&bat->sdev->dev, &bat->psy_desc, &psy_cfg);
++	if (IS_ERR(bat->psy))
++		return PTR_ERR(bat->psy);
++
++	status = ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
++	if (status)
++		goto err_notif;
++
++	status = device_create_file(&bat->psy->dev, &alarm_attr);
++	if (status)
++		goto err_file;
++
++	return 0;
++
++err_file:
++	ssam_notifier_unregister(bat->sdev->ctrl, &bat->notif);
++err_notif:
++	power_supply_unregister(bat->psy);
++	return status;
++}
++
++static void spwr_battery_unregister(struct spwr_battery_device *bat)
++{
++	ssam_notifier_unregister(bat->sdev->ctrl, &bat->notif);
++	cancel_delayed_work_sync(&bat->update_work);
++	device_remove_file(&bat->psy->dev, &alarm_attr);
++	power_supply_unregister(bat->psy);
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int __maybe_unused surface_battery_resume(struct device *dev)
++{
++	return spwr_battery_recheck_full(dev_get_drvdata(dev));
++}
++SIMPLE_DEV_PM_OPS(surface_battery_pm_ops, NULL, surface_battery_resume);
++
++static int surface_battery_probe(struct ssam_device *sdev)
++{
++	const struct spwr_psy_properties *p;
++	struct spwr_battery_device *bat;
++	int status;
++
++	p = ssam_device_get_match_data(sdev);
++	if (!p)
++		return -ENODEV;
++
++	bat = devm_kzalloc(&sdev->dev, sizeof(*bat), GFP_KERNEL);
++	if (!bat)
++		return -ENOMEM;
++
++	spwr_battery_init(bat, sdev, p->registry, p->name);
++	ssam_device_set_drvdata(sdev, bat);
++
++	status = spwr_battery_register(bat);
++	if (status)
++		spwr_battery_destroy(bat);
++
++	return status;
++}
++
++static void surface_battery_remove(struct ssam_device *sdev)
++{
++	struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
++
++	spwr_battery_unregister(bat);
++	spwr_battery_destroy(bat);
++}
++
++static const struct spwr_psy_properties spwr_psy_props_bat1 = {
++	.name = "BAT1",
++	.registry = SSAM_EVENT_REGISTRY_SAM,
++};
++
++static const struct spwr_psy_properties spwr_psy_props_bat2_sb3 = {
++	.name = "BAT2",
++	.registry = SSAM_EVENT_REGISTRY_KIP,
++};
++
++static const struct ssam_device_id surface_battery_match[] = {
++	{ SSAM_SDEV(BAT, 0x01, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1     },
++	{ SSAM_SDEV(BAT, 0x02, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_battery_match);
++
++static struct ssam_device_driver surface_battery_driver = {
++	.probe = surface_battery_probe,
++	.remove = surface_battery_remove,
++	.match_table = surface_battery_match,
++	.driver = {
++		.name = "surface_battery",
++		.pm = &surface_battery_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_ssam_device_driver(surface_battery_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Battery driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+
+From 71bde8d631028d94e2132ba05302af1baf447414 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Fri, 12 Feb 2021 21:07:17 +0100
+Subject: [PATCH] power: supply: Add AC driver for Surface Aggregator Module
+
+On newer Microsoft Surface models (specifically 7th-generation, i.e.
+Surface Pro 7, Surface Book 3, Surface Laptop 3, and Surface Laptop Go),
+battery and AC status/information is no longer handled via standard ACPI
+devices, but instead directly via the Surface System Aggregator Module
+(SSAM), i.e. the embedded controller on those devices.
+
+While on previous generation models, AC status is also handled via SSAM,
+an ACPI shim was present to translate the standard ACPI AC interface to
+SSAM requests. The SSAM interface itself, which is modeled closely after
+the ACPI interface, has not changed.
+
+This commit introduces a new SSAM client device driver to support AC
+status/information via the aforementioned interface on said Surface
+models.
+
+Patchset: surface-sam
+---
+ MAINTAINERS                            |   1 +
+ drivers/power/supply/Kconfig           |  16 ++
+ drivers/power/supply/Makefile          |   1 +
+ drivers/power/supply/surface_charger.c | 296 +++++++++++++++++++++++++
+ 4 files changed, 314 insertions(+)
+ create mode 100644 drivers/power/supply/surface_charger.c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b107e059770f..2144ec466377 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11792,6 +11792,7 @@ L:	linux-pm@vger.kernel.org
+ L:	platform-driver-x86@vger.kernel.org
+ S:	Maintained
+ F:	drivers/power/supply/surface_battery.c
++F:	drivers/power/supply/surface_charger.c
+ 
+ MICROSOFT SURFACE DTX DRIVER
+ M:	Maximilian Luz <luzmaximilian@gmail.com>
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index ed132dde0735..d4105228a2a5 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -790,4 +790,20 @@ config BATTERY_SURFACE
+ 	  Microsoft Surface devices, i.e. Surface Pro 7, Surface Laptop 3,
+ 	  Surface Book 3, and Surface Laptop Go.
+ 
++config CHARGER_SURFACE
++	tristate "AC driver for 7th-generation Microsoft Surface devices"
++	depends on SURFACE_AGGREGATOR_REGISTRY
++	help
++	  Driver for AC devices connected via/managed by the Surface System
++	  Aggregator Module (SSAM).
++
++	  This driver provides AC-information and -status support for Surface
++	  devices where said data is not exposed via the standard ACPI devices.
++	  On those models (7th-generation), AC-information is instead handled
++	  directly via a SSAM client device and this driver.
++
++	  Say M or Y here to include AC status support for 7th-generation
++	  Microsoft Surface devices, i.e. Surface Pro 7, Surface Laptop 3,
++	  Surface Book 3, and Surface Laptop Go.
++
+ endif # POWER_SUPPLY
+diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
+index cddc18994119..9fdd34956153 100644
+--- a/drivers/power/supply/Makefile
++++ b/drivers/power/supply/Makefile
+@@ -99,3 +99,4 @@ obj-$(CONFIG_CHARGER_BD99954)	+= bd99954-charger.o
+ obj-$(CONFIG_CHARGER_WILCO)	+= wilco-charger.o
+ obj-$(CONFIG_RN5T618_POWER)	+= rn5t618_power.o
+ obj-$(CONFIG_BATTERY_SURFACE)	+= surface_battery.o
++obj-$(CONFIG_CHARGER_SURFACE)	+= surface_charger.o
+diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
+new file mode 100644
+index 000000000000..982f9b9ef6f5
+--- /dev/null
++++ b/drivers/power/supply/surface_charger.c
+@@ -0,0 +1,296 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * AC driver for 7th-generation Microsoft Surface devices via Surface System
++ * Aggregator Module (SSAM).
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/power_supply.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/device.h>
++
++
++/* -- SAM interface. -------------------------------------------------------- */
++
++enum sam_event_cid_bat {
++	SAM_EVENT_CID_BAT_ADP   = 0x17,
++};
++
++enum sam_battery_sta {
++	SAM_BATTERY_STA_OK      = 0x0f,
++	SAM_BATTERY_STA_PRESENT	= 0x10,
++};
++
++/* Get battery status (_STA). */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x01,
++});
++
++/* Get platform power source for battery (_PSR / DPTF PSRC). */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psrc, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0d,
++});
++
++
++/* -- Device structures. ---------------------------------------------------- */
++
++struct spwr_psy_properties {
++	const char *name;
++	struct ssam_event_registry registry;
++};
++
++struct spwr_ac_device {
++	struct ssam_device *sdev;
++
++	char name[32];
++	struct power_supply *psy;
++	struct power_supply_desc psy_desc;
++
++	struct ssam_event_notifier notif;
++
++	struct mutex lock;  /* Guards access to state below. */
++
++	__le32 state;
++};
++
++
++/* -- State management. ----------------------------------------------------- */
++
++static int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
++{
++	u32 old = ac->state;
++	int status;
++
++	lockdep_assert_held(&ac->lock);
++
++	status = ssam_retry(ssam_bat_get_psrc, ac->sdev, &ac->state);
++	if (status < 0)
++		return status;
++
++	return old != ac->state;
++}
++
++static int spwr_ac_update(struct spwr_ac_device *ac)
++{
++	int status;
++
++	mutex_lock(&ac->lock);
++	status = spwr_ac_update_unlocked(ac);
++	mutex_unlock(&ac->lock);
++
++	return status;
++}
++
++static int spwr_ac_recheck(struct spwr_ac_device *ac)
++{
++	int status;
++
++	status = spwr_ac_update(ac);
++	if (status > 0)
++		power_supply_changed(ac->psy);
++
++	return status >= 0 ? 0 : status;
++}
++
++static u32 spwr_notify_ac(struct ssam_event_notifier *nf, const struct ssam_event *event)
++{
++	struct spwr_ac_device *ac;
++	int status;
++
++	ac = container_of(nf, struct spwr_ac_device, notif);
++
++	dev_dbg(&ac->sdev->dev, "power event (cid = %#04x, iid = %#04x, tid = %#04x)\n",
++		event->command_id, event->instance_id, event->target_id);
++
++	/*
++	 * Allow events of all targets/instances here. Global adapter status
++	 * seems to be handled via target=1 and instance=1, but events are
++	 * reported on all targets/instances in use.
++	 *
++	 * While it should be enough to just listen on 1/1, listen everywhere to
++	 * make sure we don't miss anything.
++	 */
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_ADP:
++		status = spwr_ac_recheck(ac);
++		return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++
++	default:
++		return 0;
++	}
++}
++
++
++/* -- Properties. ----------------------------------------------------------- */
++
++static enum power_supply_property spwr_ac_props[] = {
++	POWER_SUPPLY_PROP_ONLINE,
++};
++
++static int spwr_ac_get_property(struct power_supply *psy, enum power_supply_property psp,
++				union power_supply_propval *val)
++{
++	struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
++	int status;
++
++	mutex_lock(&ac->lock);
++
++	status = spwr_ac_update_unlocked(ac);
++	if (status)
++		goto out;
++
++	switch (psp) {
++	case POWER_SUPPLY_PROP_ONLINE:
++		val->intval = !!le32_to_cpu(ac->state);
++		break;
++
++	default:
++		status = -EINVAL;
++		goto out;
++	}
++
++out:
++	mutex_unlock(&ac->lock);
++	return status;
++}
++
++
++/* -- Device setup. --------------------------------------------------------- */
++
++static void spwr_ac_init(struct spwr_ac_device *ac, struct ssam_device *sdev,
++			 struct ssam_event_registry registry, const char *name)
++{
++	mutex_init(&ac->lock);
++	strncpy(ac->name, name, ARRAY_SIZE(ac->name) - 1);
++
++	ac->sdev = sdev;
++
++	ac->notif.base.priority = 1;
++	ac->notif.base.fn = spwr_notify_ac;
++	ac->notif.event.reg = registry;
++	ac->notif.event.id.target_category = sdev->uid.category;
++	ac->notif.event.id.instance = 0;
++	ac->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	ac->psy_desc.name = ac->name;
++	ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
++	ac->psy_desc.properties = spwr_ac_props;
++	ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
++	ac->psy_desc.get_property = spwr_ac_get_property;
++}
++
++static void spwr_ac_destroy(struct spwr_ac_device *ac)
++{
++	mutex_destroy(&ac->lock);
++}
++
++static int spwr_ac_register(struct spwr_ac_device *ac)
++{
++	struct power_supply_config psy_cfg = {};
++	__le32 sta;
++	int status;
++
++	/* Make sure the device is there and functioning properly. */
++	status = ssam_retry(ssam_bat_get_sta, ac->sdev, &sta);
++	if (status)
++		return status;
++
++	if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
++		return -ENODEV;
++
++	psy_cfg.drv_data = ac;
++	ac->psy = power_supply_register(&ac->sdev->dev, &ac->psy_desc, &psy_cfg);
++	if (IS_ERR(ac->psy))
++		return PTR_ERR(ac->psy);
++
++	status = ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
++	if (status)
++		power_supply_unregister(ac->psy);
++
++	return status;
++}
++
++static int spwr_ac_unregister(struct spwr_ac_device *ac)
++{
++	ssam_notifier_unregister(ac->sdev->ctrl, &ac->notif);
++	power_supply_unregister(ac->psy);
++	return 0;
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int __maybe_unused surface_ac_resume(struct device *dev)
++{
++	return spwr_ac_recheck(dev_get_drvdata(dev));
++}
++SIMPLE_DEV_PM_OPS(surface_ac_pm_ops, NULL, surface_ac_resume);
++
++static int surface_ac_probe(struct ssam_device *sdev)
++{
++	const struct spwr_psy_properties *p;
++	struct spwr_ac_device *ac;
++	int status;
++
++	p = ssam_device_get_match_data(sdev);
++	if (!p)
++		return -ENODEV;
++
++	ac = devm_kzalloc(&sdev->dev, sizeof(*ac), GFP_KERNEL);
++	if (!ac)
++		return -ENOMEM;
++
++	spwr_ac_init(ac, sdev, p->registry, p->name);
++	ssam_device_set_drvdata(sdev, ac);
++
++	status = spwr_ac_register(ac);
++	if (status)
++		spwr_ac_destroy(ac);
++
++	return status;
++}
++
++static void surface_ac_remove(struct ssam_device *sdev)
++{
++	struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
++
++	spwr_ac_unregister(ac);
++	spwr_ac_destroy(ac);
++}
++
++static const struct spwr_psy_properties spwr_psy_props_adp1 = {
++	.name = "ADP1",
++	.registry = SSAM_EVENT_REGISTRY_SAM,
++};
++
++static const struct ssam_device_id surface_ac_match[] = {
++	{ SSAM_SDEV(BAT, 0x01, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_ac_match);
++
++static struct ssam_device_driver surface_ac_driver = {
++	.probe = surface_ac_probe,
++	.remove = surface_ac_remove,
++	.match_table = surface_ac_match,
++	.driver = {
++		.name = "surface_ac",
++		.pm = &surface_ac_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_ssam_device_driver(surface_ac_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("AC driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+
+From f983d2631fe3b634dd06198b2da985794a4b1259 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sat, 13 Feb 2021 19:58:50 +0100
+Subject: [PATCH] platform/surface: Add performance mode driver
+
+Add driver to support performance mode on Surface devices with Surface
+Aggregator Module.
+
+Intended to be replaced by a platform profile driver in 5.12.
+
+Patchset: surface-sam
+---
+ drivers/platform/surface/Kconfig            |  17 +++
+ drivers/platform/surface/Makefile           |   1 +
+ drivers/platform/surface/surface_perfmode.c | 122 ++++++++++++++++++++
+ 3 files changed, 140 insertions(+)
+ create mode 100644 drivers/platform/surface/surface_perfmode.c
+
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index dea313989b4c..3ceeb316d56e 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -140,6 +140,23 @@ config SURFACE_GPE
+ 	  accordingly. It is required on those devices to allow wake-ups from
+ 	  suspend by opening the lid.
+ 
++config SURFACE_PERFMODE
++	tristate "Surface Performance-Mode Driver"
++	depends on SURFACE_AGGREGATOR_BUS
++	depends on SYSFS
++	help
++	  Driver for the performance-/cooling-mode interface of Microsoft
++	  Surface devices.
++
++	  Microsoft Surface devices using the Surface System Aggregator Module
++	  (SSAM) can be switched between different performance modes. This,
++	  depending on the device, can influence their cooling behavior and may
++	  influence power limits, allowing users to choose between performance
++	  and higher power-draw, or lower power-draw and more silent operation.
++
++	  This driver provides a user-space interface (via sysfs) for
++	  controlling said mode via the corresponding client device.
++
+ config SURFACE_PRO3_BUTTON
+ 	tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
+ 	depends on INPUT
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 19b661e274c3..31098983decc 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -14,4 +14,5 @@ obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_DTX)		+= surface_dtx.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
++obj-$(CONFIG_SURFACE_PERFMODE)		+= surface_perfmode.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surface_perfmode.c b/drivers/platform/surface/surface_perfmode.c
+new file mode 100644
+index 000000000000..3b92a43f8606
+--- /dev/null
++++ b/drivers/platform/surface/surface_perfmode.c
+@@ -0,0 +1,122 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface performance-mode driver.
++ *
++ * Provides a user-space interface for the performance mode control provided
++ * by the Surface System Aggregator Module (SSAM), influencing cooling
++ * behavior of the device and potentially managing power limits.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sysfs.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/device.h>
++
++enum sam_perf_mode {
++	SAM_PERF_MODE_NORMAL  = 1,
++	SAM_PERF_MODE_BATTERY = 2,
++	SAM_PERF_MODE_PERF1   = 3,
++	SAM_PERF_MODE_PERF2   = 4,
++
++	__SAM_PERF_MODE__MIN  = 1,
++	__SAM_PERF_MODE__MAX  = 4,
++};
++
++struct ssam_perf_info {
++	__le32 mode;
++	__le16 unknown1;
++	__le16 unknown2;
++} __packed;
++
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
++	.target_category = SSAM_SSH_TC_TMP,
++	.command_id      = 0x02,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_perf_mode_set, __le32, {
++	.target_category = SSAM_SSH_TC_TMP,
++	.command_id      = 0x03,
++});
++
++static int ssam_tmp_perf_mode_set(struct ssam_device *sdev, u32 mode)
++{
++	__le32 mode_le = cpu_to_le32(mode);
++
++	if (mode < __SAM_PERF_MODE__MIN || mode > __SAM_PERF_MODE__MAX)
++		return -EINVAL;
++
++	return ssam_retry(__ssam_tmp_perf_mode_set, sdev, &mode_le);
++}
++
++static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr,
++			      char *data)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	struct ssam_perf_info info;
++	int status;
++
++	status = ssam_retry(ssam_tmp_perf_mode_get, sdev, &info);
++	if (status) {
++		dev_err(dev, "failed to get current performance mode: %d\n",
++			status);
++		return -EIO;
++	}
++
++	return sprintf(data, "%d\n", le32_to_cpu(info.mode));
++}
++
++static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
++			       const char *data, size_t count)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	int perf_mode;
++	int status;
++
++	status = kstrtoint(data, 0, &perf_mode);
++	if (status < 0)
++		return status;
++
++	status = ssam_tmp_perf_mode_set(sdev, perf_mode);
++	if (status < 0)
++		return status;
++
++	return count;
++}
++
++static const DEVICE_ATTR_RW(perf_mode);
++
++static int surface_sam_sid_perfmode_probe(struct ssam_device *sdev)
++{
++	return sysfs_create_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
++}
++
++static void surface_sam_sid_perfmode_remove(struct ssam_device *sdev)
++{
++	sysfs_remove_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
++}
++
++static const struct ssam_device_id ssam_perfmode_match[] = {
++	{ SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, ssam_perfmode_match);
++
++static struct ssam_device_driver surface_sam_sid_perfmode = {
++	.probe = surface_sam_sid_perfmode_probe,
++	.remove = surface_sam_sid_perfmode_remove,
++	.match_table = ssam_perfmode_match,
++	.driver = {
++		.name = "surface_performance_mode",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_ssam_device_driver(surface_sam_sid_perfmode);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Performance mode interface for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+

+ 446 - 0
patches/5.11/0006-surface-hotplug.patch

@@ -0,0 +1,446 @@
+From 789f7b475f230ae8eca4a1792cd829f0aefdc190 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Thu, 4 Feb 2021 23:06:40 +0100
+Subject: [PATCH] PCI: Run platform power transition on initial D0 entry
+
+On some devices and platforms, the initial platform power state is not
+in sync with the power state of the PCI device.
+
+pci_enable_device_flags() updates the state of a PCI device by reading
+from the PCI_PM_CTRL register. This may change the stored power state of
+the device without running the appropriate platform power transition.
+
+Due to the stored power-state being changed, the later call to
+pci_set_power_state(..., PCI_D0) in do_pci_enable_device() can evaluate
+to a no-op if the stored state has been changed to D0 via that. This
+will then prevent the appropriate platform power transition to be run,
+which can on some devices and platforms lead to platform and PCI power
+state being entirely different, i.e. out-of-sync. On ACPI platforms,
+this can lead to power resources not being turned on, even though they
+are marked as required for D0.
+
+Specifically, on the Microsoft Surface Book 2 and 3, some ACPI power
+regions that should be "on" for the D0 state (and others) are
+initialized as "off" in ACPI, whereas the PCI device is in D0. As the
+state is updated in pci_enable_device_flags() without ensuring that the
+platform state is also updated, the power resource will never be
+properly turned on. Instead, it lives in a sort of on-but-marked-as-off
+zombie-state, which confuses things down the line when attempting to
+transition the device into D3cold: As the resource is already marked as
+off, it won't be turned off and the device does not fully enter D3cold,
+causing increased power consumption during (runtime-)suspend.
+
+By replacing pci_set_power_state() in do_pci_enable_device() with
+pci_power_up(), we can force pci_platform_power_transition() to be
+called, which will then check if the platform power state needs updating
+and appropriate actions need to be taken.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+
+Patchset: surface-hotplug
+---
+ drivers/pci/pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 790393d1e318..c9fcd5514409 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1800,7 +1800,7 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ 	u16 cmd;
+ 	u8 pin;
+ 
+-	err = pci_set_power_state(dev, PCI_D0);
++	err = pci_power_up(dev);
+ 	if (err < 0 && err != -EIO)
+ 		return err;
+ 
+-- 
+2.30.1
+
+From 88347b2a5d4d24c78124b1c9fdda42088192199d Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Fri, 5 Feb 2021 02:26:57 +0100
+Subject: [PATCH] platform/surface: Add Surface Hot-Plug driver
+
+Some Surface Book 2 and 3 models have a discrete GPU (dGPU) that is
+hot-pluggable. On those devices, the dGPU is contained in the base,
+which can be separated from the tablet part (containing CPU and
+touchscreen) while the device is running.
+
+It (in general) is presented as/behaves like a standard PCIe hot-plug
+capable device, however, this device can also be put into D3cold. In
+D3cold, the device itself is turned off and can thus not submit any
+standard PCIe hot-plug events. To properly detect hot-(un)plugging while
+the dGPU is in D3cold, out-of-band signaling is required. Without this,
+the device state will only get updated during the next bus-check, eg.
+via a manually issued lspci call.
+
+This commit adds a driver to handle out-of-band PCIe hot-(un)plug events
+on Microsoft Surface devices. On those devices, said events can be
+detected via GPIO interrupts, which are then forwarded to the
+corresponding ACPI DSM calls by this driver. The DSM then takes care of
+issuing the appropriate bus-/device-check, causing the PCI core to
+properly pick up the device change.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20210205012657.1951753-1-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Patchset: surface-hotplug
+---
+ MAINTAINERS                                |   6 +
+ drivers/platform/surface/Kconfig           |  19 ++
+ drivers/platform/surface/Makefile          |   1 +
+ drivers/platform/surface/surface_hotplug.c | 282 +++++++++++++++++++++
+ 4 files changed, 308 insertions(+)
+ create mode 100644 drivers/platform/surface/surface_hotplug.c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 2144ec466377..a4a0519ce88c 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11824,6 +11824,12 @@ L:	platform-driver-x86@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hid/surface-hid/
+ 
++MICROSOFT SURFACE HOT-PLUG DRIVER
++M:	Maximilian Luz <luzmaximilian@gmail.com>
++L:	platform-driver-x86@vger.kernel.org
++S:	Maintained
++F:	drivers/platform/surface/surface_hotplug.c
++
+ MICROSOFT SURFACE PRO 3 BUTTON DRIVER
+ M:	Chen Yu <yu.c.chen@intel.com>
+ L:	platform-driver-x86@vger.kernel.org
+diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
+index 3ceeb316d56e..2784a480f310 100644
+--- a/drivers/platform/surface/Kconfig
++++ b/drivers/platform/surface/Kconfig
+@@ -140,6 +140,25 @@ config SURFACE_GPE
+ 	  accordingly. It is required on those devices to allow wake-ups from
+ 	  suspend by opening the lid.
+ 
++config SURFACE_HOTPLUG
++	tristate "Surface Hot-Plug Driver"
++	depends on GPIOLIB
++	help
++	  Driver for out-of-band hot-plug event signaling on Microsoft Surface
++	  devices with hot-pluggable PCIe cards.
++
++	  This driver is used on Surface Book (2 and 3) devices with a
++	  hot-pluggable discrete GPU (dGPU). When not in use, the dGPU on those
++	  devices can enter D3cold, which prevents in-band (standard) PCIe
++	  hot-plug signaling. Thus, without this driver, detaching the base
++	  containing the dGPU will not correctly update the state of the
++	  corresponding PCIe device if it is in D3cold. This driver adds support
++	  for out-of-band hot-plug notifications, ensuring that the device state
++	  is properly updated even when the device in question is in D3cold.
++
++	  Select M or Y here, if you want to (fully) support hot-plugging of
++	  dGPU devices on the Surface Book 2 and/or 3 during D3cold.
++
+ config SURFACE_PERFMODE
+ 	tristate "Surface Performance-Mode Driver"
+ 	depends on SURFACE_AGGREGATOR_BUS
+diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
+index 31098983decc..a103fdb3ef1a 100644
+--- a/drivers/platform/surface/Makefile
++++ b/drivers/platform/surface/Makefile
+@@ -14,5 +14,6 @@ obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+ obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o
+ obj-$(CONFIG_SURFACE_DTX)		+= surface_dtx.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
++obj-$(CONFIG_SURFACE_HOTPLUG)		+= surface_hotplug.o
+ obj-$(CONFIG_SURFACE_PERFMODE)		+= surface_perfmode.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
+new file mode 100644
+index 000000000000..cfcc15cfbacb
+--- /dev/null
++++ b/drivers/platform/surface/surface_hotplug.c
+@@ -0,0 +1,282 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Book (2 and later) hot-plug driver.
++ *
++ * Surface Book devices (can) have a hot-pluggable discrete GPU (dGPU). This
++ * driver is responsible for out-of-band hot-plug event signaling on these
++ * devices. It is specifically required when the hot-plug device is in D3cold
++ * and can thus not generate PCIe hot-plug events itself.
++ *
++ * Event signaling is handled via ACPI, which will generate the appropriate
++ * device-check notifications to be picked up by the PCIe hot-plug driver.
++ *
++ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/gpio.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++
++static const struct acpi_gpio_params shps_base_presence_int   = { 0, 0, false };
++static const struct acpi_gpio_params shps_base_presence       = { 1, 0, false };
++static const struct acpi_gpio_params shps_device_power_int    = { 2, 0, false };
++static const struct acpi_gpio_params shps_device_power        = { 3, 0, false };
++static const struct acpi_gpio_params shps_device_presence_int = { 4, 0, false };
++static const struct acpi_gpio_params shps_device_presence     = { 5, 0, false };
++
++static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
++	{ "base_presence-int-gpio",   &shps_base_presence_int,   1 },
++	{ "base_presence-gpio",       &shps_base_presence,       1 },
++	{ "device_power-int-gpio",    &shps_device_power_int,    1 },
++	{ "device_power-gpio",        &shps_device_power,        1 },
++	{ "device_presence-int-gpio", &shps_device_presence_int, 1 },
++	{ "device_presence-gpio",     &shps_device_presence,     1 },
++	{ },
++};
++
++/* 5515a847-ed55-4b27-8352-cd320e10360a */
++static const guid_t shps_dsm_guid =
++	GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, 0x32, 0x0e, 0x10, 0x36, 0x0a);
++
++#define SHPS_DSM_REVISION		1
++
++enum shps_dsm_fn {
++	SHPS_DSM_FN_PCI_NUM_ENTRIES	= 0x01,
++	SHPS_DSM_FN_PCI_GET_ENTRIES	= 0x02,
++	SHPS_DSM_FN_IRQ_BASE_PRESENCE	= 0x03,
++	SHPS_DSM_FN_IRQ_DEVICE_POWER	= 0x04,
++	SHPS_DSM_FN_IRQ_DEVICE_PRESENCE	= 0x05,
++};
++
++enum shps_irq_type {
++	/* NOTE: Must be in order of enum shps_dsm_fn above. */
++	SHPS_IRQ_TYPE_BASE_PRESENCE	= 0,
++	SHPS_IRQ_TYPE_DEVICE_POWER	= 1,
++	SHPS_IRQ_TYPE_DEVICE_PRESENCE	= 2,
++	SHPS_NUM_IRQS,
++};
++
++static const char *const shps_gpio_names[] = {
++	[SHPS_IRQ_TYPE_BASE_PRESENCE]	= "base_presence",
++	[SHPS_IRQ_TYPE_DEVICE_POWER]	= "device_power",
++	[SHPS_IRQ_TYPE_DEVICE_PRESENCE]	= "device_presence",
++};
++
++struct shps_device {
++	struct mutex lock[SHPS_NUM_IRQS];  /* Protects update in shps_dsm_notify_irq() */
++	struct gpio_desc *gpio[SHPS_NUM_IRQS];
++	unsigned int irq[SHPS_NUM_IRQS];
++};
++
++#define SHPS_IRQ_NOT_PRESENT		((unsigned int)-1)
++
++static enum shps_dsm_fn shps_dsm_fn_for_irq(enum shps_irq_type type)
++{
++	return SHPS_DSM_FN_IRQ_BASE_PRESENCE + type;
++}
++
++static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type type)
++{
++	struct shps_device *sdev = platform_get_drvdata(pdev);
++	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
++	union acpi_object *result;
++	union acpi_object param;
++	int value;
++
++	mutex_lock(&sdev->lock[type]);
++
++	value = gpiod_get_value_cansleep(sdev->gpio[type]);
++	if (value < 0) {
++		mutex_unlock(&sdev->lock[type]);
++		dev_err(&pdev->dev, "failed to get gpio: %d (irq=%d)\n", type, value);
++		return;
++	}
++
++	dev_dbg(&pdev->dev, "IRQ notification via DSM (irq=%d, value=%d)\n", type, value);
++
++	param.type = ACPI_TYPE_INTEGER;
++	param.integer.value = value;
++
++	result = acpi_evaluate_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
++				   shps_dsm_fn_for_irq(type), &param);
++
++	if (!result) {
++		dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n",
++			type, value);
++
++	} else if (result->type != ACPI_TYPE_BUFFER) {
++		dev_err(&pdev->dev,
++			"IRQ notification via DSM failed: unexpected result type (irq=%d, gpio=%d)\n",
++			type, value);
++
++	} else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
++		dev_err(&pdev->dev,
++			"IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n",
++			type, value);
++	}
++
++	mutex_unlock(&sdev->lock[type]);
++
++	if (result)
++		ACPI_FREE(result);
++}
++
++static irqreturn_t shps_handle_irq(int irq, void *data)
++{
++	struct platform_device *pdev = data;
++	struct shps_device *sdev = platform_get_drvdata(pdev);
++	int type;
++
++	/* Figure out which IRQ we're handling. */
++	for (type = 0; type < SHPS_NUM_IRQS; type++)
++		if (irq == sdev->irq[type])
++			break;
++
++	/* We should have found our interrupt, if not: this is a bug. */
++	if (WARN(type >= SHPS_NUM_IRQS, "invalid IRQ number: %d\n", irq))
++		return IRQ_HANDLED;
++
++	/* Forward interrupt to ACPI via DSM. */
++	shps_dsm_notify_irq(pdev, type);
++	return IRQ_HANDLED;
++}
++
++static int shps_setup_irq(struct platform_device *pdev, enum shps_irq_type type)
++{
++	unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
++	struct shps_device *sdev = platform_get_drvdata(pdev);
++	struct gpio_desc *gpiod;
++	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
++	const char *irq_name;
++	const int dsm = shps_dsm_fn_for_irq(type);
++	int status, irq;
++
++	/*
++	 * Only set up interrupts that we actually need: The Surface Book 3
++	 * does not have a DSM for base presence, so don't set up an interrupt
++	 * for that.
++	 */
++	if (!acpi_check_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION, BIT(dsm))) {
++		dev_dbg(&pdev->dev, "IRQ notification via DSM not present (irq=%d)\n", type);
++		return 0;
++	}
++
++	gpiod = devm_gpiod_get(&pdev->dev, shps_gpio_names[type], GPIOD_ASIS);
++	if (IS_ERR(gpiod))
++		return PTR_ERR(gpiod);
++
++	irq = gpiod_to_irq(gpiod);
++	if (irq < 0)
++		return irq;
++
++	irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "shps-irq-%d", type);
++	if (!irq_name)
++		return -ENOMEM;
++
++	status = devm_request_threaded_irq(&pdev->dev, irq, NULL, shps_handle_irq,
++					   flags, irq_name, pdev);
++	if (status)
++		return status;
++
++	dev_dbg(&pdev->dev, "set up irq %d as type %d\n", irq, type);
++
++	sdev->gpio[type] = gpiod;
++	sdev->irq[type] = irq;
++
++	return 0;
++}
++
++static int surface_hotplug_remove(struct platform_device *pdev)
++{
++	struct shps_device *sdev = platform_get_drvdata(pdev);
++	int i;
++
++	/* Ensure that IRQs have been fully handled and won't trigger any more. */
++	for (i = 0; i < SHPS_NUM_IRQS; i++) {
++		if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
++			disable_irq(sdev->irq[i]);
++
++		mutex_destroy(&sdev->lock[i]);
++	}
++
++	return 0;
++}
++
++static int surface_hotplug_probe(struct platform_device *pdev)
++{
++	struct shps_device *sdev;
++	int status, i;
++
++	/*
++	 * The MSHW0153 device is also present on the Surface Laptop 3,
++	 * however that doesn't have a hot-pluggable PCIe device. It also
++	 * doesn't have any GPIO interrupts/pins under the MSHW0153, so filter
++	 * it out here.
++	 */
++	if (gpiod_count(&pdev->dev, NULL) < 0)
++		return -ENODEV;
++
++	status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios);
++	if (status)
++		return status;
++
++	sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
++	if (!sdev)
++		return -ENOMEM;
++
++	platform_set_drvdata(pdev, sdev);
++
++	/*
++	 * Initialize IRQs so that we can safely call surface_hotplug_remove()
++	 * on errors.
++	 */
++	for (i = 0; i < SHPS_NUM_IRQS; i++)
++		sdev->irq[i] = SHPS_IRQ_NOT_PRESENT;
++
++	/* Set up IRQs. */
++	for (i = 0; i < SHPS_NUM_IRQS; i++) {
++		mutex_init(&sdev->lock[i]);
++
++		status = shps_setup_irq(pdev, i);
++		if (status) {
++			dev_err(&pdev->dev, "failed to set up IRQ %d: %d\n", i, status);
++			goto err;
++		}
++	}
++
++	/* Ensure everything is up-to-date. */
++	for (i = 0; i < SHPS_NUM_IRQS; i++)
++		if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
++			shps_dsm_notify_irq(pdev, i);
++
++	return 0;
++
++err:
++	surface_hotplug_remove(pdev);
++	return status;
++}
++
++static const struct acpi_device_id surface_hotplug_acpi_match[] = {
++	{ "MSHW0153", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, surface_hotplug_acpi_match);
++
++static struct platform_driver surface_hotplug_driver = {
++	.probe = surface_hotplug_probe,
++	.remove = surface_hotplug_remove,
++	.driver = {
++		.name = "surface_hotplug",
++		.acpi_match_table = surface_hotplug_acpi_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_hotplug_driver);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Surface Hot-Plug Signaling Driver for Surface Book Devices");
++MODULE_LICENSE("GPL");
+-- 
+2.30.1
+

+ 233 - 0
patches/5.11/0007-surface-typecover.patch

@@ -0,0 +1,233 @@
+From be2a2310c392ac571f62c5180d1795f132a0f8fd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jonas=20Dre=C3=9Fler?= <verdre@v0yd.nl>
+Date: Thu, 5 Nov 2020 13:09:45 +0100
+Subject: [PATCH] hid/multitouch: Turn off Type Cover keyboard backlight when
+ suspending
+
+The Type Cover for Microsoft Surface devices supports a special usb
+control request to disable or enable the built-in keyboard backlight.
+On Windows, this request happens when putting the device into suspend or
+resuming it, without it the backlight of the Type Cover will remain
+enabled for some time even though the computer is suspended, which looks
+weird to the user.
+
+So add support for this special usb control request to hid-multitouch,
+which is the driver that's handling the Type Cover.
+
+The reason we have to use a pm_notifier for this instead of the usual
+suspend/resume methods is that those won't get called in case the usb
+device is already autosuspended.
+
+Also, if the device is autosuspended, we have to briefly autoresume it
+in order to send the request. Doing that should be fine, the usb-core
+driver does something similar during suspend inside choose_wakeup().
+
+To make sure we don't send that request to every device but only to
+devices which support it, add a new quirk
+MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER to hid-multitouch. For now this quirk
+is only enabled for the usb id of the Surface Pro 2017 Type Cover, which
+is where I confirmed that it's working.
+
+Patchset: surface-typecover
+---
+ drivers/hid/hid-multitouch.c | 100 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 98 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 8429ebe7097e..44d48e8bbe1a 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -34,7 +34,10 @@
+ #include <linux/device.h>
+ #include <linux/hid.h>
+ #include <linux/module.h>
++#include <linux/pm_runtime.h>
+ #include <linux/slab.h>
++#include <linux/suspend.h>
++#include <linux/usb.h>
+ #include <linux/input/mt.h>
+ #include <linux/jiffies.h>
+ #include <linux/string.h>
+@@ -47,6 +50,7 @@ MODULE_DESCRIPTION("HID multitouch panels");
+ MODULE_LICENSE("GPL");
+ 
+ #include "hid-ids.h"
++#include "usbhid/usbhid.h"
+ 
+ /* quirks to control the device */
+ #define MT_QUIRK_NOT_SEEN_MEANS_UP	BIT(0)
+@@ -70,12 +74,15 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_WIN8_PTP_BUTTONS	BIT(18)
+ #define MT_QUIRK_SEPARATE_APP_REPORT	BIT(19)
+ #define MT_QUIRK_FORCE_MULTI_INPUT	BIT(20)
++#define MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT	BIT(21)
+ 
+ #define MT_INPUTMODE_TOUCHSCREEN	0x02
+ #define MT_INPUTMODE_TOUCHPAD		0x03
+ 
+ #define MT_BUTTONTYPE_CLICKPAD		0
+ 
++#define MS_TYPE_COVER_FEATURE_REPORT_USAGE	0xff050086
++
+ enum latency_mode {
+ 	HID_LATENCY_NORMAL = 0,
+ 	HID_LATENCY_HIGH = 1,
+@@ -167,6 +174,8 @@ struct mt_device {
+ 
+ 	struct list_head applications;
+ 	struct list_head reports;
++
++	struct notifier_block pm_notifier;
+ };
+ 
+ static void mt_post_parse_default_settings(struct mt_device *td,
+@@ -208,6 +217,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_GOOGLE				0x0111
+ #define MT_CLS_RAZER_BLADE_STEALTH		0x0112
+ #define MT_CLS_SMART_TECH			0x0113
++#define MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER	0x0114
+ 
+ #define MT_DEFAULT_MAXCONTACT	10
+ #define MT_MAX_MAXCONTACT	250
+@@ -367,6 +377,16 @@ static const struct mt_class mt_classes[] = {
+ 			MT_QUIRK_CONTACT_CNT_ACCURATE |
+ 			MT_QUIRK_SEPARATE_APP_REPORT,
+ 	},
++	{ .name = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER,
++		.quirks = MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT |
++			MT_QUIRK_ALWAYS_VALID |
++			MT_QUIRK_IGNORE_DUPLICATES |
++			MT_QUIRK_HOVERING |
++			MT_QUIRK_CONTACT_CNT_ACCURATE |
++			MT_QUIRK_STICKY_FINGERS |
++			MT_QUIRK_WIN8_PTP_BUTTONS,
++		.export_all_inputs = true
++	},
+ 	{ }
+ };
+ 
+@@ -1674,6 +1694,69 @@ static void mt_expired_timeout(struct timer_list *t)
+ 	clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ }
+ 
++static void get_type_cover_backlight_field(struct hid_device *hdev,
++					   struct hid_field **field)
++{
++	struct hid_report_enum *rep_enum;
++	struct hid_report *rep;
++	struct hid_field *cur_field;
++	int i, j;
++
++	rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
++	list_for_each_entry(rep, &rep_enum->report_list, list) {
++		for (i = 0; i < rep->maxfield; i++) {
++			cur_field = rep->field[i];
++
++			for (j = 0; j < cur_field->maxusage; j++) {
++				if (cur_field->usage[j].hid
++				    == MS_TYPE_COVER_FEATURE_REPORT_USAGE) {
++					*field = cur_field;
++					return;
++				}
++			}
++		}
++	}
++}
++
++static void update_keyboard_backlight(struct hid_device *hdev, bool enabled)
++{
++	struct usb_device *udev = hid_to_usb_dev(hdev);
++	struct hid_field *field = NULL;
++
++	/* Wake up the device in case it's already suspended */
++	pm_runtime_get_sync(&udev->dev);
++
++	get_type_cover_backlight_field(hdev, &field);
++	if (!field) {
++		hid_err(hdev, "couldn't find backlight field\n");
++		goto out;
++	}
++
++	field->value[field->index] = enabled ? 0x01ff00ff : 0x00ff00ff;
++	hid_hw_request(hdev, field->report, HID_REQ_SET_REPORT);
++
++out:
++	pm_runtime_put_sync(&udev->dev);
++}
++
++static int mt_pm_notifier(struct notifier_block *notifier,
++			  unsigned long pm_event,
++			  void *unused)
++{
++	struct mt_device *td =
++		container_of(notifier, struct mt_device, pm_notifier);
++	struct hid_device *hdev = td->hdev;
++
++	if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT) {
++		if (pm_event == PM_SUSPEND_PREPARE)
++			update_keyboard_backlight(hdev, 0);
++		else if (pm_event == PM_POST_SUSPEND)
++			update_keyboard_backlight(hdev, 1);
++	}
++
++	return NOTIFY_DONE;
++}
++
+ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ 	int ret, i;
+@@ -1697,6 +1780,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
+ 	hid_set_drvdata(hdev, td);
+ 
++	td->pm_notifier.notifier_call = mt_pm_notifier;
++	register_pm_notifier(&td->pm_notifier);
++
+ 	INIT_LIST_HEAD(&td->applications);
+ 	INIT_LIST_HEAD(&td->reports);
+ 
+@@ -1726,15 +1812,19 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	timer_setup(&td->release_timer, mt_expired_timeout, 0);
+ 
+ 	ret = hid_parse(hdev);
+-	if (ret != 0)
++	if (ret != 0) {
++		unregister_pm_notifier(&td->pm_notifier);
+ 		return ret;
++	}
+ 
+ 	if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
+ 		mt_fix_const_fields(hdev, HID_DG_CONTACTID);
+ 
+ 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+-	if (ret)
++	if (ret) {
++		unregister_pm_notifier(&td->pm_notifier);
+ 		return ret;
++	}
+ 
+ 	ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+ 	if (ret)
+@@ -1770,6 +1860,7 @@ static void mt_remove(struct hid_device *hdev)
+ {
+ 	struct mt_device *td = hid_get_drvdata(hdev);
+ 
++	unregister_pm_notifier(&td->pm_notifier);
+ 	del_timer_sync(&td->release_timer);
+ 
+ 	sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
+@@ -2121,6 +2212,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ 			USB_DEVICE_ID_XIROKU_CSR2) },
+ 
++	/* Microsoft Surface type cover */
++	{ .driver_data = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER,
++		HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
++			USB_VENDOR_ID_MICROSOFT, 0x09c0) },
++
+ 	/* Google MT devices */
+ 	{ .driver_data = MT_CLS_GOOGLE,
+ 		HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
+-- 
+2.30.1
+

+ 53 - 0
patches/5.11/0008-surface-sensors.patch

@@ -0,0 +1,53 @@
+From 85bf81d420abd726680d219c6cc338af1dd3d480 Mon Sep 17 00:00:00 2001
+From: Max Leiter <maxwell.leiter@gmail.com>
+Date: Sat, 19 Dec 2020 17:50:55 -0800
+Subject: [PATCH] iio:light:apds9960 add detection for MSHW0184 ACPI device in
+ apds9960 driver
+
+The device is used in the Microsoft Surface Book 3 and Surface Pro 7
+
+Signed-off-by: Max Leiter <maxwell.leiter@gmail.com>
+Reviewed-by: Matt Ranostay <matt.ranostay@konsulko.com>
+Link: https://lore.kernel.org/r/20201220015057.107246-1-maxwell.leiter@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Patchset: surface-sensors
+---
+ drivers/iio/light/apds9960.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
+index 547e7f9d6920..df0647856e5d 100644
+--- a/drivers/iio/light/apds9960.c
++++ b/drivers/iio/light/apds9960.c
+@@ -8,6 +8,7 @@
+  * TODO: gesture + proximity calib offsets
+  */
+ 
++#include <linux/acpi.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -1113,6 +1114,12 @@ static const struct i2c_device_id apds9960_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, apds9960_id);
+ 
++static const struct acpi_device_id apds9960_acpi_match[] = {
++	{ "MSHW0184" },
++	{ }
++};
++MODULE_DEVICE_TABLE(acpi, apds9960_acpi_match);
++
+ static const struct of_device_id apds9960_of_match[] = {
+ 	{ .compatible = "avago,apds9960" },
+ 	{ }
+@@ -1124,6 +1131,7 @@ static struct i2c_driver apds9960_driver = {
+ 		.name	= APDS9960_DRV_NAME,
+ 		.of_match_table = apds9960_of_match,
+ 		.pm	= &apds9960_pm_ops,
++		.acpi_match_table = apds9960_acpi_match,
+ 	},
+ 	.probe		= apds9960_probe,
+ 	.remove		= apds9960_remove,
+-- 
+2.30.1
+

+ 7317 - 0
patches/5.11/0009-cameras.patch

@@ -0,0 +1,7317 @@
+From b6df1f1f4904e1f9a4735c430c11a472fa3c474d Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 10 Oct 2020 23:42:09 +0100
+Subject: [PATCH] software_node: Fix refcounts in
+ software_node_get_next_child()
+
+The software_node_get_next_child() function currently does not hold
+references to the child software_node that it finds or put the ref that
+is held against the old child - fix that.
+
+Fixes: 59abd83672f7 ("drivers: base: Introducing software nodes to the firmware node framework")
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/swnode.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4a4b2008fbc2..4fcc1a6fb724 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -443,14 +443,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
+ 	struct swnode *c = to_swnode(child);
+ 
+ 	if (!p || list_empty(&p->children) ||
+-	    (c && list_is_last(&c->entry, &p->children)))
++	    (c && list_is_last(&c->entry, &p->children))) {
++		fwnode_handle_put(child);
+ 		return NULL;
++	}
+ 
+ 	if (c)
+ 		c = list_next_entry(c, entry);
+ 	else
+ 		c = list_first_entry(&p->children, struct swnode, entry);
+-	return &c->fwnode;
++
++	fwnode_handle_put(child);
++	return fwnode_handle_get(&c->fwnode);
+ }
+ 
+ static struct fwnode_handle *
+-- 
+2.30.1
+
+From 9119d068d1f45d793de73f2c791f16f5c36b5458 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 30 Dec 2020 22:44:05 +0200
+Subject: [PATCH] media: ipu3-cio2: Add headers that ipu3-cio2.h is direct user
+ of
+
+Add headers that ipu3-cio2.h is direct user of.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Daniel Scally <djrscally@gmail.com>
+Tested-by: Daniel Scally <djrscally@gmail.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/pci/intel/ipu3/ipu3-cio2.h | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+index ccf0b85ae36f..62187ab5ae43 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+@@ -4,8 +4,26 @@
+ #ifndef __IPU3_CIO2_H
+ #define __IPU3_CIO2_H
+ 
++#include <linux/bits.h>
++#include <linux/dma-mapping.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
+ #include <linux/types.h>
+ 
++#include <asm/page.h>
++
++#include <media/media-device.h>
++#include <media/media-entity.h>
++#include <media/v4l2-async.h>
++#include <media/v4l2-dev.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-subdev.h>
++#include <media/videobuf2-core.h>
++#include <media/videobuf2-v4l2.h>
++
++struct cio2_fbpt_entry;		/* defined here, after the first usage */
++struct pci_dev;
++
+ #define CIO2_NAME					"ipu3-cio2"
+ #define CIO2_DEVICE_NAME				"Intel IPU3 CIO2"
+ #define CIO2_ENTITY_NAME				"ipu3-csi2"
+-- 
+2.30.1
+
+From 6d388078962c74c27edecdc3f9350c73a4b06be7 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 24 Oct 2020 22:42:28 +0100
+Subject: [PATCH] device property: Return true in fwnode_device_is_available
+ for NULL ops
+
+Some types of fwnode_handle do not implement the device_is_available()
+check, such as those created by software_nodes. There isn't really a
+meaningful way to check for the availability of a device that doesn't
+actually exist, so if the check isn't implemented just assume that the
+"device" is present.
+
+Suggested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/property.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 35b95c6ac0c6..0bf5260f14c6 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -837,9 +837,15 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
+ /**
+  * fwnode_device_is_available - check if a device is available for use
+  * @fwnode: Pointer to the fwnode of the device.
++ *
++ * For fwnode node types that don't implement the .device_is_available()
++ * operation, this function returns true.
+  */
+ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
+ {
++	if (!fwnode_has_op(fwnode, device_is_available))
++		return true;
++
+ 	return fwnode_call_bool_op(fwnode, device_is_available);
+ }
+ EXPORT_SYMBOL_GPL(fwnode_device_is_available);
+-- 
+2.30.1
+
+From d937d514c28776fc4a0a3f9bbd44aa5bd3650161 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 21 Nov 2020 22:06:38 +0000
+Subject: [PATCH] device property: Call fwnode_graph_get_endpoint_by_id() for
+ fwnode->secondary
+
+This function is used to find fwnode endpoints against a device. In
+some instances those endpoints are software nodes which are children of
+fwnode->secondary. Add support to fwnode_graph_get_endpoint_by_id() to
+find those endpoints by recursively calling itself passing the ptr to
+fwnode->secondary in the event no endpoint is found for the primary.
+
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/property.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 0bf5260f14c6..1421e9548857 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -1215,7 +1215,14 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ 		best_ep_id = fwnode_ep.id;
+ 	}
+ 
+-	return best_ep;
++	if (best_ep)
++		return best_ep;
++
++	if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
++		return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port,
++						       endpoint, flags);
++
++	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
+ 
+-- 
+2.30.1
+
+From e4f8f968690d7edeee00513081b012fd67e7cce0 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sun, 25 Oct 2020 22:49:08 +0000
+Subject: [PATCH] software_node: Enforce parent before child ordering of nodes
+ arrays
+
+Registering software_nodes with the .parent member set to point to a
+currently unregistered software_node has the potential for problems,
+so enforce parent -> child ordering in arrays passed in to
+software_node_register_nodes().
+
+Software nodes that are children of another software node should be
+unregistered before their parent. To allow easy unregistering of an array
+of software_nodes ordered parent to child, reverse the order in which
+software_node_unregister_nodes() unregisters software_nodes.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/swnode.c | 42 ++++++++++++++++++++++++++++++------------
+ 1 file changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4fcc1a6fb724..166c5cc73f39 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -692,7 +692,11 @@ swnode_register(const struct software_node *node, struct swnode *parent,
+  * software_node_register_nodes - Register an array of software nodes
+  * @nodes: Zero terminated array of software nodes to be registered
+  *
+- * Register multiple software nodes at once.
++ * Register multiple software nodes at once. If any node in the array
++ * has its .parent pointer set (which can only be to another software_node),
++ * then its parent **must** have been registered before it is; either outside
++ * of this function or by ordering the array such that parent comes before
++ * child.
+  */
+ int software_node_register_nodes(const struct software_node *nodes)
+ {
+@@ -700,14 +704,23 @@ int software_node_register_nodes(const struct software_node *nodes)
+ 	int i;
+ 
+ 	for (i = 0; nodes[i].name; i++) {
+-		ret = software_node_register(&nodes[i]);
+-		if (ret) {
+-			software_node_unregister_nodes(nodes);
+-			return ret;
++		const struct software_node *parent = nodes[i].parent;
++
++		if (parent && !software_node_to_swnode(parent)) {
++			ret = -EINVAL;
++			goto err_unregister_nodes;
+ 		}
++
++		ret = software_node_register(&nodes[i]);
++		if (ret)
++			goto err_unregister_nodes;
+ 	}
+ 
+ 	return 0;
++
++err_unregister_nodes:
++	software_node_unregister_nodes(nodes);
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(software_node_register_nodes);
+ 
+@@ -715,18 +728,23 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
+  * software_node_unregister_nodes - Unregister an array of software nodes
+  * @nodes: Zero terminated array of software nodes to be unregistered
+  *
+- * Unregister multiple software nodes at once.
++ * Unregister multiple software nodes at once. If parent pointers are set up
++ * in any of the software nodes then the array **must** be ordered such that
++ * parents come before their children.
+  *
+- * NOTE: Be careful using this call if the nodes had parent pointers set up in
+- * them before registering.  If so, it is wiser to remove the nodes
+- * individually, in the correct order (child before parent) instead of relying
+- * on the sequential order of the list of nodes in the array.
++ * NOTE: If you are uncertain whether the array is ordered such that
++ * parents will be unregistered before their children, it is wiser to
++ * remove the nodes individually, in the correct order (child before
++ * parent).
+  */
+ void software_node_unregister_nodes(const struct software_node *nodes)
+ {
+-	int i;
++	unsigned int i = 0;
++
++	while (nodes[i].name)
++		i++;
+ 
+-	for (i = 0; nodes[i].name; i++)
++	while (i--)
+ 		software_node_unregister(&nodes[i]);
+ }
+ EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+-- 
+2.30.1
+
+From 0a4f7d6ed8bdfea4dc5dcf3a60ab451c1053fe80 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Wed, 21 Oct 2020 22:25:03 +0100
+Subject: [PATCH] software_node: unregister software_nodes in reverse order
+
+To maintain consistency with software_node_unregister_nodes(), reverse
+the order in which the software_node_unregister_node_group() function
+unregisters nodes.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/swnode.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 166c5cc73f39..6f7443c6d3b5 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -779,16 +779,23 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
+  * software_node_unregister_node_group - Unregister a group of software nodes
+  * @node_group: NULL terminated array of software node pointers to be unregistered
+  *
+- * Unregister multiple software nodes at once.
++ * Unregister multiple software nodes at once. The array will be unwound in
++ * reverse order (i.e. last entry first) and thus if any members of the array are
++ * children of another member then the children must appear later in the list such
++ * that they are unregistered first.
+  */
+-void software_node_unregister_node_group(const struct software_node **node_group)
++void software_node_unregister_node_group(
++		const struct software_node **node_group)
+ {
+-	unsigned int i;
++	unsigned int i = 0;
+ 
+ 	if (!node_group)
+ 		return;
+ 
+-	for (i = 0; node_group[i]; i++)
++	while (node_group[i])
++		i++;
++
++	while (i--)
+ 		software_node_unregister(node_group[i]);
+ }
+ EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
+-- 
+2.30.1
+
+From f823cf632a2493e3ed7b9445af8558255b233060 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Tue, 22 Dec 2020 13:09:05 +0000
+Subject: [PATCH] device property: Define format macros for ports and endpoints
+
+OF, ACPI and software_nodes all implement graphs including nodes for ports
+and endpoints. These are all intended to be named with a common schema,
+as "port@n" and "endpoint@n" where n is an unsigned int representing the
+index of the node. To ensure commonality across the subsystems, provide a
+set of macros to define the format.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ include/linux/fwnode.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
+index fde4ad97564c..77414e431e89 100644
+--- a/include/linux/fwnode.h
++++ b/include/linux/fwnode.h
+@@ -50,6 +50,13 @@ struct fwnode_endpoint {
+ 	const struct fwnode_handle *local_fwnode;
+ };
+ 
++/*
++ * ports and endpoints defined as software_nodes should all follow a common
++ * naming scheme; use these macros to ensure commonality.
++ */
++#define SWNODE_GRAPH_PORT_NAME_FMT		"port@%u"
++#define SWNODE_GRAPH_ENDPOINT_NAME_FMT		"endpoint@%u"
++
+ #define NR_FWNODE_REFERENCE_ARGS	8
+ 
+ /**
+-- 
+2.30.1
+
+From 1e9cc0db2370b954e7cf6e365982d6028d48f47e Mon Sep 17 00:00:00 2001
+From: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Date: Tue, 15 Sep 2020 15:47:46 +0100
+Subject: [PATCH] software_node: Add support for fwnode_graph*() family of
+ functions
+
+This implements the remaining .graph_*() callbacks in the fwnode
+operations structure for the software nodes. That makes the
+fwnode_graph_*() functions available in the drivers also when software
+nodes are used.
+
+The implementation tries to mimic the "OF graph" as much as possible, but
+there is no support for the "reg" device property. The ports will need to
+have the index in their  name which starts with "port@" (for example
+"port@0", "port@1", ...) and endpoints will use the index of the software
+node that is given to them during creation. The port nodes can also be
+grouped under a specially named "ports" subnode, just like in DT, if
+necessary.
+
+The remote-endpoints are reference properties under the endpoint nodes
+that are named "remote-endpoint".
+
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Co-developed-by: Daniel Scally <djrscally@gmail.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/base/swnode.c | 115 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 114 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 6f7443c6d3b5..9104a0abd531 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -540,6 +540,115 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ 	return 0;
+ }
+ 
++static struct fwnode_handle *
++swnode_graph_find_next_port(const struct fwnode_handle *parent,
++			    struct fwnode_handle *port)
++{
++	struct fwnode_handle *old = port;
++
++	while ((port = software_node_get_next_child(parent, old))) {
++		/*
++		 * fwnode ports have naming style "port@", so we search for any
++		 * children that follow that convention.
++		 */
++		if (!strncmp(to_swnode(port)->node->name, "port@",
++			     strlen("port@")))
++			return port;
++		old = port;
++	}
++
++	return NULL;
++}
++
++static struct fwnode_handle *
++software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
++				      struct fwnode_handle *endpoint)
++{
++	struct swnode *swnode = to_swnode(fwnode);
++	struct fwnode_handle *parent;
++	struct fwnode_handle *port;
++
++	if (!swnode)
++		return NULL;
++
++	if (endpoint) {
++		port = software_node_get_parent(endpoint);
++		parent = software_node_get_parent(port);
++	} else {
++		parent = software_node_get_named_child_node(fwnode, "ports");
++		if (!parent)
++			parent = software_node_get(&swnode->fwnode);
++
++		port = swnode_graph_find_next_port(parent, NULL);
++	}
++
++	for (; port; port = swnode_graph_find_next_port(parent, port)) {
++		endpoint = software_node_get_next_child(port, endpoint);
++		if (endpoint) {
++			fwnode_handle_put(port);
++			break;
++		}
++	}
++
++	fwnode_handle_put(parent);
++
++	return endpoint;
++}
++
++static struct fwnode_handle *
++software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
++{
++	struct swnode *swnode = to_swnode(fwnode);
++	const struct software_node_ref_args *ref;
++	const struct property_entry *prop;
++
++	if (!swnode)
++		return NULL;
++
++	prop = property_entry_get(swnode->node->properties, "remote-endpoint");
++	if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
++		return NULL;
++
++	ref = prop->pointer;
++
++	return software_node_get(software_node_fwnode(ref[0].node));
++}
++
++static struct fwnode_handle *
++software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
++{
++	struct swnode *swnode = to_swnode(fwnode);
++
++	swnode = swnode->parent;
++	if (swnode && !strcmp(swnode->node->name, "ports"))
++		swnode = swnode->parent;
++
++	return swnode ? software_node_get(&swnode->fwnode) : NULL;
++}
++
++static int
++software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
++				   struct fwnode_endpoint *endpoint)
++{
++	struct swnode *swnode = to_swnode(fwnode);
++	const char *parent_name = swnode->parent->node->name;
++	int ret;
++
++	if (strlen("port@") >= strlen(parent_name) ||
++	    strncmp(parent_name, "port@", strlen("port@")))
++		return -EINVAL;
++
++	/* Ports have naming style "port@n", we need to select the n */
++	ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
++	if (ret)
++		return ret;
++
++	endpoint->id = swnode->id;
++	endpoint->local_fwnode = fwnode;
++
++	return 0;
++}
++
+ static const struct fwnode_operations software_node_ops = {
+ 	.get = software_node_get,
+ 	.put = software_node_put,
+@@ -551,7 +660,11 @@ static const struct fwnode_operations software_node_ops = {
+ 	.get_parent = software_node_get_parent,
+ 	.get_next_child_node = software_node_get_next_child,
+ 	.get_named_child_node = software_node_get_named_child_node,
+-	.get_reference_args = software_node_get_reference_args
++	.get_reference_args = software_node_get_reference_args,
++	.graph_get_next_endpoint = software_node_graph_get_next_endpoint,
++	.graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
++	.graph_get_port_parent = software_node_graph_get_port_parent,
++	.graph_parse_endpoint = software_node_graph_parse_endpoint,
+ };
+ 
+ /* -------------------------------------------------------------------------- */
+-- 
+2.30.1
+
+From 076f3a5f8b5208c2601870ac7ed6789ee93bc1b9 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 10 Oct 2020 23:07:22 +0100
+Subject: [PATCH] lib/test_printf.c: Use helper function to unwind array of
+ software_nodes
+
+Use the software_node_unregister_nodes() helper function to unwind this
+array in a cleaner way.
+
+Acked-by: Petr Mladek <pmladek@suse.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ lib/test_printf.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/lib/test_printf.c b/lib/test_printf.c
+index 7ac87f18a10f..7d60f24240a4 100644
+--- a/lib/test_printf.c
++++ b/lib/test_printf.c
+@@ -644,9 +644,7 @@ static void __init fwnode_pointer(void)
+ 	test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
+ 	test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
+ 
+-	software_node_unregister(&softnodes[2]);
+-	software_node_unregister(&softnodes[1]);
+-	software_node_unregister(&softnodes[0]);
++	software_node_unregister_nodes(softnodes);
+ }
+ 
+ static void __init
+-- 
+2.30.1
+
+From f2216eb19d6643cba7876af0d3d7f2fe9193bd16 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 10 Oct 2020 23:11:36 +0100
+Subject: [PATCH] ipu3-cio2: Add T: entry to MAINTAINERS
+
+Development for the ipu3-cio2 driver is taking place in media_tree, but
+there's no T: entry in MAINTAINERS to denote that - rectify that oversight
+
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ MAINTAINERS | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a4a0519ce88c..66ce274c17d7 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9010,6 +9010,7 @@ M:	Bingbu Cao <bingbu.cao@intel.com>
+ R:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
++T:	git git://linuxtv.org/media_tree.git
+ F:	Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
+ F:	drivers/media/pci/intel/ipu3/
+ 
+-- 
+2.30.1
+
+From d296a20a7e3452abafc94e1ab070673864f46eae Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 10 Oct 2020 22:47:21 +0100
+Subject: [PATCH] ipu3-cio2: Rename ipu3-cio2.c
+
+ipu3-cio2 driver needs extending with multiple files; rename the main
+source file and specify the renamed file in Makefile to accommodate that.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/pci/intel/ipu3/Makefile                          | 2 ++
+ drivers/media/pci/intel/ipu3/{ipu3-cio2.c => ipu3-cio2-main.c} | 0
+ 2 files changed, 2 insertions(+)
+ rename drivers/media/pci/intel/ipu3/{ipu3-cio2.c => ipu3-cio2-main.c} (100%)
+
+diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
+index 98ddd5beafe0..429d516452e4 100644
+--- a/drivers/media/pci/intel/ipu3/Makefile
++++ b/drivers/media/pci/intel/ipu3/Makefile
+@@ -1,2 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
++
++ipu3-cio2-y += ipu3-cio2-main.o
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+similarity index 100%
+rename from drivers/media/pci/intel/ipu3/ipu3-cio2.c
+rename to drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+-- 
+2.30.1
+
+From c68d9e1970c22cb653c66d2b5265313bdc57fefb Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Wed, 21 Oct 2020 21:53:05 +0100
+Subject: [PATCH] media: v4l2-core: v4l2-async: Check sd->fwnode->secondary in
+ match_fwnode()
+
+Where the fwnode graph is comprised of software_nodes, these will be
+assigned as the secondary to dev->fwnode. Check the v4l2_subdev's fwnode
+for a secondary and attempt to match against it during match_fwnode() to
+accommodate that possibility.
+
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/v4l2-core/v4l2-async.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index e3ab003a6c85..9dd896d085ec 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -87,6 +87,14 @@ static bool match_fwnode(struct v4l2_async_notifier *notifier,
+ 	if (sd->fwnode == asd->match.fwnode)
+ 		return true;
+ 
++	/*
++	 * Check the same situation for any possible secondary assigned to the
++	 * subdev's fwnode
++	 */
++	if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
++	    sd->fwnode->secondary == asd->match.fwnode)
++		return true;
++
+ 	/*
+ 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
+ 	 * endpoint or a device. If they're of the same type, there's no match.
+-- 
+2.30.1
+
+From 59a8a1e0fe448a026d2b58d3f253c2d21e40bd44 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sun, 15 Nov 2020 08:15:34 +0000
+Subject: [PATCH] ACPI / bus: Add acpi_dev_get_next_match_dev() and helper
+ macro
+
+To ensure we handle situations in which multiple sensors of the same
+model (and therefore _HID) are present in a system, we need to be able
+to iterate over devices matching a known _HID but unknown _UID and _HRV
+ - add acpi_dev_get_next_match_dev() to accommodate that possibility and
+change acpi_dev_get_first_match_dev() to simply call the new function
+with a NULL starting point. Add an iterator macro for convenience.
+
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/acpi/utils.c    | 30 ++++++++++++++++++++++++++----
+ include/acpi/acpi_bus.h |  7 +++++++
+ 2 files changed, 33 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
+index d5411a166685..ddca1550cce6 100644
+--- a/drivers/acpi/utils.c
++++ b/drivers/acpi/utils.c
+@@ -843,12 +843,13 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+ EXPORT_SYMBOL(acpi_dev_present);
+ 
+ /**
+- * acpi_dev_get_first_match_dev - Return the first match of ACPI device
++ * acpi_dev_get_next_match_dev - Return the next match of ACPI device
++ * @adev: Pointer to the previous acpi_device matching this @hid, @uid and @hrv
+  * @hid: Hardware ID of the device.
+  * @uid: Unique ID of the device, pass NULL to not check _UID
+  * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+  *
+- * Return the first match of ACPI device if a matching device was present
++ * Return the next match of ACPI device if another matching device was present
+  * at the moment of invocation, or NULL otherwise.
+  *
+  * The caller is responsible to call put_device() on the returned device.
+@@ -856,8 +857,9 @@ EXPORT_SYMBOL(acpi_dev_present);
+  * See additional information in acpi_dev_present() as well.
+  */
+ struct acpi_device *
+-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
++acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv)
+ {
++	struct device *start = adev ? &adev->dev : NULL;
+ 	struct acpi_dev_match_info match = {};
+ 	struct device *dev;
+ 
+@@ -865,9 +867,29 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+ 	match.uid = uid;
+ 	match.hrv = hrv;
+ 
+-	dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
++	dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
+ 	return dev ? to_acpi_device(dev) : NULL;
+ }
++EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
++
++/**
++ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
++ * @hid: Hardware ID of the device.
++ * @uid: Unique ID of the device, pass NULL to not check _UID
++ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
++ *
++ * Return the first match of ACPI device if a matching device was present
++ * at the moment of invocation, or NULL otherwise.
++ *
++ * The caller is responsible to call put_device() on the returned device.
++ *
++ * See additional information in acpi_dev_present() as well.
++ */
++struct acpi_device *
++acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
++{
++	return acpi_dev_get_next_match_dev(NULL, hid, uid, hrv);
++}
+ EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
+ 
+ /*
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 6d1879bf9440..02a716a0af5d 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -683,9 +683,16 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
+ 
+ bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+ 
++struct acpi_device *
++acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
+ struct acpi_device *
+ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+ 
++#define for_each_acpi_dev_match(adev, hid, uid, hrv)			\
++	for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv);	\
++	     adev;							\
++	     adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv))
++
+ static inline void acpi_dev_put(struct acpi_device *adev)
+ {
+ 	put_device(&adev->dev);
+-- 
+2.30.1
+
+From bfdb7fa362ffe0e50e5f90bb9d39a2ab61c1fd02 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 19 Dec 2020 23:55:04 +0000
+Subject: [PATCH] media: v4l2-fwnode: Include v4l2_fwnode_bus_type
+
+V4L2 fwnode bus types are enumerated in v4l2-fwnode.c, meaning they aren't
+available to the rest of the kernel. Move the enum to the corresponding
+header so that I can use the label to refer to those values.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/v4l2-core/v4l2-fwnode.c | 11 -----------
+ include/media/v4l2-fwnode.h           | 22 ++++++++++++++++++++++
+ 2 files changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
+index 5353e37eb950..c1c2b3060532 100644
+--- a/drivers/media/v4l2-core/v4l2-fwnode.c
++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
+@@ -28,17 +28,6 @@
+ #include <media/v4l2-fwnode.h>
+ #include <media/v4l2-subdev.h>
+ 
+-enum v4l2_fwnode_bus_type {
+-	V4L2_FWNODE_BUS_TYPE_GUESS = 0,
+-	V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+-	V4L2_FWNODE_BUS_TYPE_CSI1,
+-	V4L2_FWNODE_BUS_TYPE_CCP2,
+-	V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+-	V4L2_FWNODE_BUS_TYPE_PARALLEL,
+-	V4L2_FWNODE_BUS_TYPE_BT656,
+-	NR_OF_V4L2_FWNODE_BUS_TYPE,
+-};
+-
+ static const struct v4l2_fwnode_bus_conv {
+ 	enum v4l2_fwnode_bus_type fwnode_bus_type;
+ 	enum v4l2_mbus_type mbus_type;
+diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
+index 4365430eea6f..77fd6a3ec308 100644
+--- a/include/media/v4l2-fwnode.h
++++ b/include/media/v4l2-fwnode.h
+@@ -213,6 +213,28 @@ struct v4l2_fwnode_connector {
+ 	} connector;
+ };
+ 
++/**
++ * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties
++ * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property
++ * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer
++ * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus
++ * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus
++ * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer
++ * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus
++ * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type
++ * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types
++ */
++enum v4l2_fwnode_bus_type {
++	V4L2_FWNODE_BUS_TYPE_GUESS = 0,
++	V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
++	V4L2_FWNODE_BUS_TYPE_CSI1,
++	V4L2_FWNODE_BUS_TYPE_CCP2,
++	V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
++	V4L2_FWNODE_BUS_TYPE_PARALLEL,
++	V4L2_FWNODE_BUS_TYPE_BT656,
++	NR_OF_V4L2_FWNODE_BUS_TYPE
++};
++
+ /**
+  * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
+  * @fwnode: pointer to the endpoint's fwnode handle
+-- 
+2.30.1
+
+From 2ecacb9cd48ca324e4d74eeb0ce4d3128cd43710 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Wed, 21 Oct 2020 21:53:44 +0100
+Subject: [PATCH] ipu3-cio2: Add cio2-bridge to ipu3-cio2 driver
+
+Currently on platforms designed for Windows, connections between CIO2 and
+sensors are not properly defined in DSDT. This patch extends the ipu3-cio2
+driver to compensate by building software_node connections, parsing the
+connection properties from the sensor's SSDB buffer.
+
+Suggested-by: Jordan Hand <jorhand@linux.microsoft.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ MAINTAINERS                                   |   1 +
+ drivers/media/pci/intel/ipu3/Kconfig          |  18 +
+ drivers/media/pci/intel/ipu3/Makefile         |   1 +
+ drivers/media/pci/intel/ipu3/cio2-bridge.c    | 311 ++++++++++++++++++
+ drivers/media/pci/intel/ipu3/cio2-bridge.h    | 125 +++++++
+ drivers/media/pci/intel/ipu3/ipu3-cio2-main.c |  34 ++
+ drivers/media/pci/intel/ipu3/ipu3-cio2.h      |   6 +
+ 7 files changed, 496 insertions(+)
+ create mode 100644 drivers/media/pci/intel/ipu3/cio2-bridge.c
+ create mode 100644 drivers/media/pci/intel/ipu3/cio2-bridge.h
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 66ce274c17d7..fb99543648d3 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9007,6 +9007,7 @@ INTEL IPU3 CSI-2 CIO2 DRIVER
+ M:	Yong Zhi <yong.zhi@intel.com>
+ M:	Sakari Ailus <sakari.ailus@linux.intel.com>
+ M:	Bingbu Cao <bingbu.cao@intel.com>
++M:	Dan Scally <djrscally@gmail.com>
+ R:	Tianshu Qiu <tian.shu.qiu@intel.com>
+ L:	linux-media@vger.kernel.org
+ S:	Maintained
+diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
+index 82d7f17e6a02..96a2231b16ad 100644
+--- a/drivers/media/pci/intel/ipu3/Kconfig
++++ b/drivers/media/pci/intel/ipu3/Kconfig
+@@ -16,3 +16,21 @@ config VIDEO_IPU3_CIO2
+ 	  Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2
+ 	  connected camera.
+ 	  The module will be called ipu3-cio2.
++
++config CIO2_BRIDGE
++	bool "IPU3 CIO2 Sensors Bridge"
++	depends on VIDEO_IPU3_CIO2
++	help
++	  This extension provides an API for the ipu3-cio2 driver to create
++	  connections to cameras that are hidden in the SSDB buffer in ACPI.
++	  It can be used to enable support for cameras in detachable / hybrid
++	  devices that ship with Windows.
++
++	  Say Y here if your device is a detachable / hybrid laptop that comes
++	  with Windows installed by the OEM, for example:
++
++		- Microsoft Surface models (except Surface Pro 3)
++		- The Lenovo Miix line (for example the 510, 520, 710 and 720)
++		- Dell 7285
++
++	  If in doubt, say N here.
+diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
+index 429d516452e4..933777e6ea8a 100644
+--- a/drivers/media/pci/intel/ipu3/Makefile
++++ b/drivers/media/pci/intel/ipu3/Makefile
+@@ -2,3 +2,4 @@
+ obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
+ 
+ ipu3-cio2-y += ipu3-cio2-main.o
++ipu3-cio2-$(CONFIG_CIO2_BRIDGE) += cio2-bridge.o
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+new file mode 100644
+index 000000000000..143f3c0f445e
+--- /dev/null
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+@@ -0,0 +1,311 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Author: Dan Scally <djrscally@gmail.com> */
++
++#include <linux/acpi.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/property.h>
++#include <media/v4l2-fwnode.h>
++
++#include "cio2-bridge.h"
++
++/*
++ * Extend this array with ACPI Hardware IDs of devices known to be working
++ * plus the number of link-frequencies expected by their drivers, along with
++ * the frequency values in hertz. This is somewhat opportunistic way of adding
++ * support for this for now in the hopes of a better source for the information
++ * (possibly some encoded value in the SSDB buffer that we're unaware of)
++ * becoming apparent in the future.
++ *
++ * Do not add an entry for a sensor that is not actually supported.
++ */
++static const struct cio2_sensor_config cio2_supported_sensors[] = {
++	/* Omnivision OV5693 */
++	CIO2_SENSOR_CONFIG("INT33BE", 0),
++	/* Omnivision OV2680 */
++	CIO2_SENSOR_CONFIG("OVTI2680", 0),
++};
++
++static const struct cio2_property_names prop_names = {
++	.clock_frequency = "clock-frequency",
++	.rotation = "rotation",
++	.bus_type = "bus-type",
++	.data_lanes = "data-lanes",
++	.remote_endpoint = "remote-endpoint",
++	.link_frequencies = "link-frequencies",
++};
++
++static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
++					void *data, u32 size)
++{
++	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++	acpi_status status;
++	int ret = 0;
++
++	status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	obj = buffer.pointer;
++	if (!obj) {
++		dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
++		return -ENODEV;
++	}
++
++	if (obj->type != ACPI_TYPE_BUFFER) {
++		dev_err(&adev->dev, "Not an ACPI buffer\n");
++		ret = -ENODEV;
++		goto out_free_buff;
++	}
++
++	if (obj->buffer.length > size) {
++		dev_err(&adev->dev, "Given buffer is too small\n");
++		ret = -EINVAL;
++		goto out_free_buff;
++	}
++
++	memcpy(data, obj->buffer.pointer, obj->buffer.length);
++
++out_free_buff:
++	kfree(buffer.pointer);
++	return ret;
++}
++
++static void cio2_bridge_create_fwnode_properties(
++	struct cio2_sensor *sensor,
++	struct cio2_bridge *bridge,
++	const struct cio2_sensor_config *cfg)
++{
++	sensor->prop_names = prop_names;
++
++	sensor->local_ref[0].node = &sensor->swnodes[SWNODE_CIO2_ENDPOINT];
++	sensor->remote_ref[0].node = &sensor->swnodes[SWNODE_SENSOR_ENDPOINT];
++
++	sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.clock_frequency,
++					sensor->ssdb.mclkspeed);
++	sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
++					sensor->prop_names.rotation,
++					sensor->ssdb.degree);
++
++	sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.bus_type,
++					V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
++	sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
++					sensor->prop_names.data_lanes,
++					bridge->data_lanes,
++					sensor->ssdb.lanes);
++	sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
++					sensor->prop_names.remote_endpoint,
++					sensor->local_ref);
++
++	if (cfg->nr_link_freqs > 0)
++		sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
++						sensor->prop_names.link_frequencies,
++						cfg->link_freqs,
++						cfg->nr_link_freqs);
++
++	sensor->cio2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
++					sensor->prop_names.data_lanes,
++					bridge->data_lanes,
++					sensor->ssdb.lanes);
++	sensor->cio2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
++					sensor->prop_names.remote_endpoint,
++					sensor->remote_ref);
++}
++
++static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor)
++{
++	snprintf(sensor->node_names.remote_port,
++		 sizeof(sensor->node_names.remote_port),
++		 SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link);
++	snprintf(sensor->node_names.port,
++		 sizeof(sensor->node_names.port),
++		 SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
++	snprintf(sensor->node_names.endpoint,
++		 sizeof(sensor->node_names.endpoint),
++		 SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
++}
++
++static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
++						  struct cio2_sensor *sensor)
++{
++	struct software_node *nodes = sensor->swnodes;
++
++	cio2_bridge_init_swnode_names(sensor);
++
++	nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
++					       sensor->dev_properties);
++	nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
++					      &nodes[SWNODE_SENSOR_HID]);
++	nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
++						sensor->node_names.endpoint,
++						&nodes[SWNODE_SENSOR_PORT],
++						sensor->ep_properties);
++	nodes[SWNODE_CIO2_PORT] = NODE_PORT(sensor->node_names.remote_port,
++					    &bridge->cio2_hid_node);
++	nodes[SWNODE_CIO2_ENDPOINT] = NODE_ENDPOINT(
++						sensor->node_names.endpoint,
++						&nodes[SWNODE_CIO2_PORT],
++						sensor->cio2_properties);
++}
++
++static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
++{
++	struct cio2_sensor *sensor;
++	unsigned int i;
++
++	for (i = 0; i < bridge->n_sensors; i++) {
++		sensor = &bridge->sensors[i];
++		software_node_unregister_nodes(sensor->swnodes);
++		acpi_dev_put(sensor->adev);
++	}
++}
++
++static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
++				      struct cio2_bridge *bridge,
++				      struct pci_dev *cio2)
++{
++	struct fwnode_handle *fwnode;
++	struct cio2_sensor *sensor;
++	struct acpi_device *adev;
++	int ret;
++
++	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
++		if (!adev->status.enabled)
++			continue;
++
++		if (bridge->n_sensors >= CIO2_NUM_PORTS) {
++			dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
++			cio2_bridge_unregister_sensors(bridge);
++			ret = -EINVAL;
++			goto err_out;
++		}
++
++		sensor = &bridge->sensors[bridge->n_sensors];
++		sensor->adev = adev;
++		strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
++
++		ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
++						   &sensor->ssdb,
++						   sizeof(sensor->ssdb));
++		if (ret)
++			goto err_put_adev;
++
++		if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
++			dev_err(&adev->dev,
++				"Number of lanes in SSDB is invalid\n");
++			ret = -EINVAL;
++			goto err_put_adev;
++		}
++
++		cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
++		cio2_bridge_create_connection_swnodes(bridge, sensor);
++
++		ret = software_node_register_nodes(sensor->swnodes);
++		if (ret)
++			goto err_put_adev;
++
++		fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_SENSOR_HID]);
++		if (!fwnode) {
++			ret = -ENODEV;
++			goto err_free_swnodes;
++		}
++
++		adev->fwnode.secondary = fwnode;
++
++		dev_info(&cio2->dev, "Found supported sensor %s\n",
++			 acpi_dev_name(adev));
++
++		bridge->n_sensors++;
++	}
++
++	return 0;
++
++err_free_swnodes:
++	software_node_unregister_nodes(sensor->swnodes);
++err_put_adev:
++	acpi_dev_put(sensor->adev);
++err_out:
++	return ret;
++}
++
++static int cio2_bridge_connect_sensors(struct cio2_bridge *bridge,
++				       struct pci_dev *cio2)
++{
++	unsigned int i;
++	int ret;
++
++	for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
++		const struct cio2_sensor_config *cfg = &cio2_supported_sensors[i];
++
++		ret = cio2_bridge_connect_sensor(cfg, bridge, cio2);
++		if (ret)
++			goto err_unregister_sensors;
++	}
++
++	return 0;
++
++err_unregister_sensors:
++	cio2_bridge_unregister_sensors(bridge);
++	return ret;
++}
++
++int cio2_bridge_init(struct pci_dev *cio2)
++{
++	struct device *dev = &cio2->dev;
++	struct fwnode_handle *fwnode;
++	struct cio2_bridge *bridge;
++	unsigned int i;
++	int ret;
++
++	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
++	if (!bridge)
++		return -ENOMEM;
++
++	strscpy(bridge->cio2_node_name, CIO2_HID, sizeof(bridge->cio2_node_name));
++	bridge->cio2_hid_node.name = bridge->cio2_node_name;
++
++	ret = software_node_register(&bridge->cio2_hid_node);
++	if (ret < 0) {
++		dev_err(dev, "Failed to register the CIO2 HID node\n");
++		goto err_free_bridge;
++	}
++
++	/*
++	 * Map the lane arrangement, which is fixed for the IPU3 (meaning we
++	 * only need one, rather than one per sensor). We include it as a
++	 * member of the struct cio2_bridge rather than a global variable so
++	 * that it survives if the module is unloaded along with the rest of
++	 * the struct.
++	 */
++	for (i = 0; i < CIO2_MAX_LANES; i++)
++		bridge->data_lanes[i] = i + 1;
++
++	ret = cio2_bridge_connect_sensors(bridge, cio2);
++	if (ret || bridge->n_sensors == 0)
++		goto err_unregister_cio2;
++
++	dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
++
++	fwnode = software_node_fwnode(&bridge->cio2_hid_node);
++	if (!fwnode) {
++		dev_err(dev, "Error getting fwnode from cio2 software_node\n");
++		ret = -ENODEV;
++		goto err_unregister_sensors;
++	}
++
++	set_secondary_fwnode(dev, fwnode);
++
++	return 0;
++
++err_unregister_sensors:
++	cio2_bridge_unregister_sensors(bridge);
++err_unregister_cio2:
++	software_node_unregister(&bridge->cio2_hid_node);
++err_free_bridge:
++	kfree(bridge);
++
++	return ret;
++}
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+new file mode 100644
+index 000000000000..dd0ffcafa489
+--- /dev/null
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+@@ -0,0 +1,125 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Author: Dan Scally <djrscally@gmail.com> */
++#ifndef __CIO2_BRIDGE_H
++#define __CIO2_BRIDGE_H
++
++#include <linux/property.h>
++#include <linux/types.h>
++
++#include "ipu3-cio2.h"
++
++#define CIO2_HID				"INT343E"
++#define CIO2_MAX_LANES				4
++#define MAX_NUM_LINK_FREQS			3
++
++#define CIO2_SENSOR_CONFIG(_HID, _NR, ...)	\
++	(const struct cio2_sensor_config) {	\
++		.hid = _HID,			\
++		.nr_link_freqs = _NR,		\
++		.link_freqs = { __VA_ARGS__ }	\
++	}
++
++#define NODE_SENSOR(_HID, _PROPS)		\
++	(const struct software_node) {		\
++		.name = _HID,			\
++		.properties = _PROPS,		\
++	}
++
++#define NODE_PORT(_PORT, _SENSOR_NODE)		\
++	(const struct software_node) {		\
++		.name = _PORT,			\
++		.parent = _SENSOR_NODE,		\
++	}
++
++#define NODE_ENDPOINT(_EP, _PORT, _PROPS)	\
++	(const struct software_node) {		\
++		.name = _EP,			\
++		.parent = _PORT,		\
++		.properties = _PROPS,		\
++	}
++
++enum cio2_sensor_swnodes {
++	SWNODE_SENSOR_HID,
++	SWNODE_SENSOR_PORT,
++	SWNODE_SENSOR_ENDPOINT,
++	SWNODE_CIO2_PORT,
++	SWNODE_CIO2_ENDPOINT,
++	SWNODE_COUNT
++};
++
++/* Data representation as it is in ACPI SSDB buffer */
++struct cio2_sensor_ssdb {
++	u8 version;
++	u8 sku;
++	u8 guid_csi2[16];
++	u8 devfunction;
++	u8 bus;
++	u32 dphylinkenfuses;
++	u32 clockdiv;
++	u8 link;
++	u8 lanes;
++	u32 csiparams[10];
++	u32 maxlanespeed;
++	u8 sensorcalibfileidx;
++	u8 sensorcalibfileidxInMBZ[3];
++	u8 romtype;
++	u8 vcmtype;
++	u8 platforminfo;
++	u8 platformsubinfo;
++	u8 flash;
++	u8 privacyled;
++	u8 degree;
++	u8 mipilinkdefined;
++	u32 mclkspeed;
++	u8 controllogicid;
++	u8 reserved1[3];
++	u8 mclkport;
++	u8 reserved2[13];
++} __packed;
++
++struct cio2_property_names {
++	char clock_frequency[16];
++	char rotation[9];
++	char bus_type[9];
++	char data_lanes[11];
++	char remote_endpoint[16];
++	char link_frequencies[17];
++};
++
++struct cio2_node_names {
++	char port[7];
++	char endpoint[11];
++	char remote_port[7];
++};
++
++struct cio2_sensor_config {
++	const char *hid;
++	const u8 nr_link_freqs;
++	const u64 link_freqs[MAX_NUM_LINK_FREQS];
++};
++
++struct cio2_sensor {
++	char name[ACPI_ID_LEN];
++	struct acpi_device *adev;
++
++	struct software_node swnodes[6];
++	struct cio2_node_names node_names;
++
++	struct cio2_sensor_ssdb ssdb;
++	struct cio2_property_names prop_names;
++	struct property_entry ep_properties[5];
++	struct property_entry dev_properties[3];
++	struct property_entry cio2_properties[3];
++	struct software_node_ref_args local_ref[1];
++	struct software_node_ref_args remote_ref[1];
++};
++
++struct cio2_bridge {
++	char cio2_node_name[ACPI_ID_LEN];
++	struct software_node cio2_hid_node;
++	u32 data_lanes[4];
++	unsigned int n_sensors;
++	struct cio2_sensor sensors[CIO2_NUM_PORTS];
++};
++
++#endif
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 6cada8a6e50c..c830c12babd9 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -1702,11 +1702,28 @@ static void cio2_queues_exit(struct cio2_device *cio2)
+ 		cio2_queue_exit(cio2, &cio2->queue[i]);
+ }
+ 
++static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
++{
++	struct fwnode_handle *endpoint;
++
++	if (IS_ERR_OR_NULL(fwnode))
++		return -EINVAL;
++
++	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
++	if (endpoint) {
++		fwnode_handle_put(endpoint);
++		return 0;
++	}
++
++	return cio2_check_fwnode_graph(fwnode->secondary);
++}
++
+ /**************** PCI interface ****************/
+ 
+ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 			  const struct pci_device_id *id)
+ {
++	struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
+ 	struct cio2_device *cio2;
+ 	int r;
+ 
+@@ -1715,6 +1732,23 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 		return -ENOMEM;
+ 	cio2->pci_dev = pci_dev;
+ 
++	/*
++	 * On some platforms no connections to sensors are defined in firmware,
++	 * if the device has no endpoints then we can try to build those as
++	 * software_nodes parsed from SSDB.
++	 */
++	r = cio2_check_fwnode_graph(fwnode);
++	if (r) {
++		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
++			dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
++			return -EINVAL;
++		}
++
++		r = cio2_bridge_init(pci_dev);
++		if (r)
++			return r;
++	}
++
+ 	r = pcim_enable_device(pci_dev);
+ 	if (r) {
+ 		dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+index 62187ab5ae43..dc3e343a37fb 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+@@ -455,4 +455,10 @@ static inline struct cio2_queue *vb2q_to_cio2_queue(struct vb2_queue *vq)
+ 	return container_of(vq, struct cio2_queue, vbq);
+ }
+ 
++#if IS_ENABLED(CONFIG_CIO2_BRIDGE)
++int cio2_bridge_init(struct pci_dev *cio2);
++#else
++int cio2_bridge_init(struct pci_dev *cio2) { return 0; }
++#endif
++
+ #endif
+-- 
+2.30.1
+
+From b3f74699bf73617b5dca455e08005e5150b29d89 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Wed, 2 Dec 2020 12:38:10 +0000
+Subject: [PATCH] acpi: utils: move acpi_lpss_dep() to utils
+
+I need to be able to identify devices which declare themselves to be
+dependent on other devices through _DEP; add this function to utils.c
+and export it to the rest of the ACPI layer.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/acpi/acpi_lpss.c | 24 ------------------------
+ drivers/acpi/internal.h  |  1 +
+ drivers/acpi/utils.c     | 24 ++++++++++++++++++++++++
+ 3 files changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index be73974ce449..70c7d9a3f715 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -543,30 +543,6 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+ 	return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
+ }
+ 
+-static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+-{
+-	struct acpi_handle_list dep_devices;
+-	acpi_status status;
+-	int i;
+-
+-	if (!acpi_has_method(adev->handle, "_DEP"))
+-		return false;
+-
+-	status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+-					 &dep_devices);
+-	if (ACPI_FAILURE(status)) {
+-		dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+-		return false;
+-	}
+-
+-	for (i = 0; i < dep_devices.count; i++) {
+-		if (dep_devices.handles[i] == handle)
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ static void acpi_lpss_link_consumer(struct device *dev1,
+ 				    const struct lpss_device_links *link)
+ {
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index e6a5d997241c..68fc6a03aed1 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -79,6 +79,7 @@ static inline void acpi_lpss_init(void) {}
+ #endif
+ 
+ void acpi_apd_init(void);
++bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle);
+ 
+ acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
+ bool acpi_queue_hotplug_work(struct work_struct *work);
+diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
+index ddca1550cce6..78b38775f18b 100644
+--- a/drivers/acpi/utils.c
++++ b/drivers/acpi/utils.c
+@@ -807,6 +807,30 @@ static int acpi_dev_match_cb(struct device *dev, const void *data)
+ 	return hrv == match->hrv;
+ }
+ 
++bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
++{
++	struct acpi_handle_list dep_devices;
++	acpi_status status;
++	int i;
++
++	if (!acpi_has_method(adev->handle, "_DEP"))
++		return false;
++
++	status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
++					 &dep_devices);
++	if (ACPI_FAILURE(status)) {
++		dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
++		return false;
++	}
++
++	for (i = 0; i < dep_devices.count; i++) {
++		if (dep_devices.handles[i] == handle)
++			return true;
++	}
++
++	return false;
++}
++
+ /**
+  * acpi_dev_present - Detect that a given ACPI device is present
+  * @hid: Hardware ID of the device.
+-- 
+2.30.1
+
+From c8a8199d91263824437e9d704738e4f802218d92 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Thu, 26 Nov 2020 21:12:41 +0000
+Subject: [PATCH] acpi: utils: Add function to fetch dependent acpi_devices
+
+In some ACPI tables we encounter, devices use the _DEP method to assert
+a dependence on other ACPI devices as opposed to the OpRegions that the
+specification intends. We need to be able to find those devices "from"
+the dependee, so add a function to parse all ACPI Devices and check if
+the include the handle of the dependee device in their _DEP buffer.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/acpi/utils.c    | 34 ++++++++++++++++++++++++++++++++++
+ include/acpi/acpi_bus.h |  2 ++
+ 2 files changed, 36 insertions(+)
+
+diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
+index 78b38775f18b..ec6a2406a886 100644
+--- a/drivers/acpi/utils.c
++++ b/drivers/acpi/utils.c
+@@ -831,6 +831,18 @@ bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+ 	return false;
+ }
+ 
++static int acpi_dev_match_by_dep(struct device *dev, const void *data)
++{
++	struct acpi_device *adev = to_acpi_device(dev);
++	const struct acpi_device *dependee = data;
++	acpi_handle handle = dependee->handle;
++
++	if (acpi_lpss_dep(adev, handle))
++		return 1;
++
++	return 0;
++}
++
+ /**
+  * acpi_dev_present - Detect that a given ACPI device is present
+  * @hid: Hardware ID of the device.
+@@ -866,6 +878,28 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+ }
+ EXPORT_SYMBOL(acpi_dev_present);
+ 
++/**
++ * acpi_dev_get_next_dep_dev - Return next ACPI device dependent on input dev
++ * @adev: Pointer to the dependee device
++ * @prev: Pointer to the previous dependent device (or NULL for first match)
++ *
++ * Return the next ACPI device which declares itself dependent on @adev in
++ * the _DEP buffer.
++ *
++ * The caller is responsible to call put_device() on the returned device.
++ */
++struct acpi_device *acpi_dev_get_next_dep_dev(struct acpi_device *adev,
++					      struct acpi_device *prev)
++{
++	struct device *start = prev ? &prev->dev : NULL;
++	struct device *dev;
++
++	dev = bus_find_device(&acpi_bus_type, start, adev, acpi_dev_match_by_dep);
++
++	return dev ? to_acpi_device(dev) : NULL;
++}
++EXPORT_SYMBOL(acpi_dev_get_next_dep_dev);
++
+ /**
+  * acpi_dev_get_next_match_dev - Return the next match of ACPI device
+  * @adev: Pointer to the previous acpi_device matching this @hid, @uid and @hrv
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 02a716a0af5d..33deb22294f2 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -683,6 +683,8 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
+ 
+ bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+ 
++struct acpi_device *
++acpi_dev_get_next_dep_dev(struct acpi_device *adev, struct acpi_device *prev);
+ struct acpi_device *
+ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
+ struct acpi_device *
+-- 
+2.30.1
+
+From d18c420c43a3ddab897837fe408766ff3599e1b6 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Mon, 16 Nov 2020 21:38:49 +0000
+Subject: [PATCH] i2c: i2c-core-base: Use format macro in i2c_dev_set_name()
+
+Some places in the kernel allow users to map resources to a device
+using device name (for example, gpiod_lookup_table). Currently
+this involves waiting for the i2c_client to have been registered so we
+can use dev_name(&client->dev). We want to add a function to allow users
+to refer to an i2c device by name before it has been instantiated, so
+create a macro for the format that's accessible outside the i2c layer
+and use it in i2c_dev_set_name()
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/i2c/i2c-core-base.c | 4 ++--
+ include/linux/i2c.h         | 7 +++++++
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 63ebf722a424..547b8926cac8 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -811,12 +811,12 @@ static void i2c_dev_set_name(struct i2c_adapter *adap,
+ 	struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ 
+ 	if (info && info->dev_name) {
+-		dev_set_name(&client->dev, "i2c-%s", info->dev_name);
++		dev_set_name(&client->dev, I2C_DEV_NAME_FORMAT, info->dev_name);
+ 		return;
+ 	}
+ 
+ 	if (adev) {
+-		dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
++		dev_set_name(&client->dev, I2C_DEV_NAME_FORMAT, acpi_dev_name(adev));
+ 		return;
+ 	}
+ 
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 56622658b215..65acae61dc5c 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -39,6 +39,9 @@ enum i2c_slave_event;
+ typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
+ 			      enum i2c_slave_event event, u8 *val);
+ 
++/* I2C Device Name Format - to maintain consistency outside the i2c layer */
++#define I2C_DEV_NAME_FORMAT		"i2c-%s"
++
+ /* I2C Frequency Modes */
+ #define I2C_MAX_STANDARD_MODE_FREQ	100000
+ #define I2C_MAX_FAST_MODE_FREQ		400000
+@@ -1011,6 +1014,10 @@ static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ {
+ 	return ERR_PTR(-ENODEV);
+ }
++static inline char *i2c_acpi_dev_name(struct acpi_device *adev)
++{
++	return NULL;
++}
+ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
+ {
+ 	return NULL;
+-- 
+2.30.1
+
+From c77e0ac04cb8e73291c520554153aee9bb3761ad Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Wed, 2 Dec 2020 16:41:42 +0000
+Subject: [PATCH] i2c: i2c-core-acpi: Add i2c_acpi_dev_name()
+
+We want to refer to an i2c device by name before it has been
+created by the kernel; add a function that constructs the name
+from the acpi device instead.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/i2c/i2c-core-acpi.c | 16 ++++++++++++++++
+ include/linux/i2c.h         |  1 +
+ 2 files changed, 17 insertions(+)
+
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index aed579942436..89751415b69b 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -497,6 +497,22 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
+ }
+ EXPORT_SYMBOL_GPL(i2c_acpi_new_device);
+ 
++/**
++ * i2c_acpi_dev_name - Construct i2c device name for devs sourced from ACPI
++ * @adev:     ACPI device to construct the name for
++ *
++ * Constructs the name of an i2c device matching the format used by
++ * i2c_dev_set_name() to allow users to refer to an i2c device by name even
++ * before they have been instantiated.
++ * 
++ * The caller is responsible for freeing the returned pointer.
++ */
++char *i2c_acpi_dev_name(struct acpi_device *adev)
++{
++	return kasprintf(GFP_KERNEL, I2C_DEV_NAME_FORMAT, acpi_dev_name(adev));
++}
++EXPORT_SYMBOL_GPL(i2c_acpi_dev_name);
++
+ #ifdef CONFIG_ACPI_I2C_OPREGION
+ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+ 		u8 cmd, u8 *data, u8 data_len)
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 65acae61dc5c..b82aac05b17f 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -998,6 +998,7 @@ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ u32 i2c_acpi_find_bus_speed(struct device *dev);
+ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
+ 				       struct i2c_board_info *info);
++char *i2c_acpi_dev_name(struct acpi_device *adev);
+ struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+ #else
+ static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+-- 
+2.30.1
+
+From 3697ae368de6eba0dcf4f776c752e5d4bbf5aa35 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Mon, 16 Nov 2020 00:16:56 +0000
+Subject: [PATCH] gpio: gpiolib-acpi: Export acpi_get_gpiod()
+
+I need to be able to translate GPIO resources in an acpi_device's _CRS
+into gpio_descs. Those are represented in _CRS as a pathname to a GPIO
+device plus the pin's index number: this function is perfect for that
+purpose.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/gpio/gpiolib-acpi.c | 3 ++-
+ include/linux/acpi.h        | 5 +++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index e37a57d0a2f0..83f9f85cd0ab 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -111,7 +111,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
+  * controller does not have GPIO chip registered at the moment. This is to
+  * support probe deferral.
+  */
+-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
++struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+ {
+ 	struct gpio_chip *chip;
+ 	acpi_handle handle;
+@@ -127,6 +127,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+ 
+ 	return gpiochip_get_desc(chip, pin);
+ }
++EXPORT_SYMBOL_GPL(acpi_get_gpiod);
+ 
+ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
+ {
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 053bf05fb1f7..626d43b00c4f 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1073,6 +1073,7 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
+ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ 				struct acpi_resource_gpio **agpio);
+ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
++struct gpio_desc *acpi_get_gpiod(char *path, int pin);
+ #else
+ static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ 					      struct acpi_resource_gpio **agpio)
+@@ -1083,6 +1084,10 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+ {
+ 	return -ENXIO;
+ }
++struct gpio_desc *acpi_get_gpiod(char *path, int pin)
++{
++	return NULL;
++}
+ #endif
+ 
+ /* Device properties */
+-- 
+2.30.1
+
+From 2d9e688f36d83ffdfebe60223bf3ec24be34b019 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 12 Dec 2020 23:56:59 +0000
+Subject: [PATCH] mfd: Remove tps68470 MFD driver
+
+This driver only covered one scenario in which ACPI devices with _HID
+INT3472 are found, and its functionality has been taken over by the
+intel-skl-int3472 module, so remove it.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/acpi/pmic/Kconfig |  1 -
+ drivers/gpio/Kconfig      |  1 -
+ drivers/mfd/Kconfig       | 18 --------
+ drivers/mfd/Makefile      |  1 -
+ drivers/mfd/tps68470.c    | 97 ---------------------------------------
+ 5 files changed, 118 deletions(-)
+ delete mode 100644 drivers/mfd/tps68470.c
+
+diff --git a/drivers/acpi/pmic/Kconfig b/drivers/acpi/pmic/Kconfig
+index 56bbcb2ce61b..e27d8ef3a32c 100644
+--- a/drivers/acpi/pmic/Kconfig
++++ b/drivers/acpi/pmic/Kconfig
+@@ -52,7 +52,6 @@ endif	# PMIC_OPREGION
+ 
+ config TPS68470_PMIC_OPREGION
+ 	bool "ACPI operation region support for TPS68470 PMIC"
+-	depends on MFD_TPS68470
+ 	help
+ 	  This config adds ACPI operation region support for TI TPS68470 PMIC.
+ 	  TPS68470 device is an advanced power management unit that powers
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index fa225175e68d..27b43d9c5da6 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -1347,7 +1347,6 @@ config GPIO_TPS65912
+ 
+ config GPIO_TPS68470
+ 	bool "TPS68470 GPIO"
+-	depends on MFD_TPS68470
+ 	help
+ 	  Select this option to enable GPIO driver for the TPS68470
+ 	  chip family.
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index bdfce7b15621..9a1f648efde0 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -1520,24 +1520,6 @@ config MFD_TPS65217
+ 	  This driver can also be built as a module.  If so, the module
+ 	  will be called tps65217.
+ 
+-config MFD_TPS68470
+-	bool "TI TPS68470 Power Management / LED chips"
+-	depends on ACPI && PCI && I2C=y
+-	depends on I2C_DESIGNWARE_PLATFORM=y
+-	select MFD_CORE
+-	select REGMAP_I2C
+-	help
+-	  If you say yes here you get support for the TPS68470 series of
+-	  Power Management / LED chips.
+-
+-	  These include voltage regulators, LEDs and other features
+-	  that are often used in portable devices.
+-
+-	  This option is a bool as it provides an ACPI operation
+-	  region, which must be available before any of the devices
+-	  using this are probed. This option also configures the
+-	  designware-i2c driver to be built-in, for the same reason.
+-
+ config MFD_TI_LP873X
+ 	tristate "TI LP873X Power Management IC"
+ 	depends on I2C
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index 14fdb188af02..5994e812f479 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -105,7 +105,6 @@ obj-$(CONFIG_MFD_TPS65910)	+= tps65910.o
+ obj-$(CONFIG_MFD_TPS65912)	+= tps65912-core.o
+ obj-$(CONFIG_MFD_TPS65912_I2C)	+= tps65912-i2c.o
+ obj-$(CONFIG_MFD_TPS65912_SPI)  += tps65912-spi.o
+-obj-$(CONFIG_MFD_TPS68470)	+= tps68470.o
+ obj-$(CONFIG_MFD_TPS80031)	+= tps80031.o
+ obj-$(CONFIG_MENELAUS)		+= menelaus.o
+ 
+diff --git a/drivers/mfd/tps68470.c b/drivers/mfd/tps68470.c
+deleted file mode 100644
+index 4a4df4ffd18c..000000000000
+--- a/drivers/mfd/tps68470.c
++++ /dev/null
+@@ -1,97 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * TPS68470 chip Parent driver
+- *
+- * Copyright (C) 2017 Intel Corporation
+- *
+- * Authors:
+- *	Rajmohan Mani <rajmohan.mani@intel.com>
+- *	Tianshu Qiu <tian.shu.qiu@intel.com>
+- *	Jian Xu Zheng <jian.xu.zheng@intel.com>
+- *	Yuning Pu <yuning.pu@intel.com>
+- */
+-
+-#include <linux/acpi.h>
+-#include <linux/delay.h>
+-#include <linux/i2c.h>
+-#include <linux/init.h>
+-#include <linux/mfd/core.h>
+-#include <linux/mfd/tps68470.h>
+-#include <linux/regmap.h>
+-
+-static const struct mfd_cell tps68470s[] = {
+-	{ .name = "tps68470-gpio" },
+-	{ .name = "tps68470_pmic_opregion" },
+-};
+-
+-static const struct regmap_config tps68470_regmap_config = {
+-	.reg_bits = 8,
+-	.val_bits = 8,
+-	.max_register = TPS68470_REG_MAX,
+-};
+-
+-static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
+-{
+-	unsigned int version;
+-	int ret;
+-
+-	/* Force software reset */
+-	ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
+-	if (ret)
+-		return ret;
+-
+-	ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
+-	if (ret) {
+-		dev_err(dev, "Failed to read revision register: %d\n", ret);
+-		return ret;
+-	}
+-
+-	dev_info(dev, "TPS68470 REVID: 0x%x\n", version);
+-
+-	return 0;
+-}
+-
+-static int tps68470_probe(struct i2c_client *client)
+-{
+-	struct device *dev = &client->dev;
+-	struct regmap *regmap;
+-	int ret;
+-
+-	regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
+-	if (IS_ERR(regmap)) {
+-		dev_err(dev, "devm_regmap_init_i2c Error %ld\n",
+-			PTR_ERR(regmap));
+-		return PTR_ERR(regmap);
+-	}
+-
+-	i2c_set_clientdata(client, regmap);
+-
+-	ret = tps68470_chip_init(dev, regmap);
+-	if (ret < 0) {
+-		dev_err(dev, "TPS68470 Init Error %d\n", ret);
+-		return ret;
+-	}
+-
+-	ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tps68470s,
+-			      ARRAY_SIZE(tps68470s), NULL, 0, NULL);
+-	if (ret < 0) {
+-		dev_err(dev, "devm_mfd_add_devices failed: %d\n", ret);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct acpi_device_id tps68470_acpi_ids[] = {
+-	{"INT3472"},
+-	{},
+-};
+-
+-static struct i2c_driver tps68470_driver = {
+-	.driver = {
+-		   .name = "tps68470",
+-		   .acpi_match_table = tps68470_acpi_ids,
+-	},
+-	.probe_new = tps68470_probe,
+-};
+-builtin_i2c_driver(tps68470_driver);
+-- 
+2.30.1
+
+From 35c50c04d90453ec373602263c14209453be8f59 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Fri, 15 Jan 2021 12:37:31 +0000
+Subject: [PATCH] platform: x86: Add intel_skl_int3472 driver
+
+ACPI devices with _HID INT3472 are currently matched to the tps68470
+driver, however this does not cover all situations in which that _HID
+occurs. We've encountered three possibilities:
+
+1. On Chrome OS devices, an ACPI device with _HID INT3472 (representing
+a physical tps68470 device) that requires a GPIO and OpRegion driver
+2. On devices designed for Windows, an ACPI device with _HID INT3472
+(again representing a physical tps68470 device) which requires GPIO,
+Clock and Regulator drivers.
+3. On other devices designed for Windows, an ACPI device with _HID
+INT3472 which does NOT represent a physical tps68470, and is instead
+used as a dummy device to group some system GPIO lines which are meant
+to be consumed by the sensor that is dependent on this entry.
+
+This commit adds a new module, registering a platform driver to deal
+with the 3rd scenario plus an i2c-driver to deal with #1 and #2, by
+querying the CLDB buffer found against INT3472 entries to determine
+which is most appropriate.
+
+Suggested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ MAINTAINERS                                   |   5 +
+ drivers/platform/x86/Kconfig                  |  25 +
+ drivers/platform/x86/Makefile                 |   5 +
+ .../platform/x86/intel_skl_int3472_common.c   | 100 ++++
+ .../platform/x86/intel_skl_int3472_common.h   |  99 ++++
+ .../platform/x86/intel_skl_int3472_discrete.c | 489 ++++++++++++++++++
+ .../platform/x86/intel_skl_int3472_tps68470.c | 145 ++++++
+ 7 files changed, 868 insertions(+)
+ create mode 100644 drivers/platform/x86/intel_skl_int3472_common.c
+ create mode 100644 drivers/platform/x86/intel_skl_int3472_common.h
+ create mode 100644 drivers/platform/x86/intel_skl_int3472_discrete.c
+ create mode 100644 drivers/platform/x86/intel_skl_int3472_tps68470.c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index fb99543648d3..f543702b8c65 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9140,6 +9140,11 @@ S:	Maintained
+ F:	arch/x86/include/asm/intel_scu_ipc.h
+ F:	drivers/platform/x86/intel_scu_*
+ 
++INTEL SKL INT3472 ACPI DEVICE DRIVER
++M:	Daniel Scally <djrscally@gmail.com>
++S:	Maintained
++F:	drivers/platform/x86/intel_skl_int3472_*
++
+ INTEL SPEED SELECT TECHNOLOGY
+ M:	Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ L:	platform-driver-x86@vger.kernel.org
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 91e6176cdfbd..916b077df2d5 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -844,6 +844,31 @@ config INTEL_CHT_INT33FE
+ 	  device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
+ 	  for Type-C device.
+ 
++config INTEL_SKL_INT3472
++	tristate "Intel SkyLake ACPI INT3472 Driver"
++	depends on X86 && ACPI
++	select REGMAP_I2C
++	help
++	  This driver adds support for the INT3472 ACPI devices found on some
++	  Intel SkyLake devices.
++
++	  There are 3 kinds of INT3472 ACPI device possible; two for devices
++	  designed for Windows (either with or without a physical tps68470
++	  PMIC) and one designed for Chrome OS. This driver handles all three
++	  situations by discovering information it needs to discern them at
++	  runtime.
++
++	  If your device was designed for Chrome OS, this driver will provide
++	  an ACPI operation region, which must be available before any of the
++	  devices using this are probed. For this reason, you should select Y
++	  if your device was designed for ChromeOS. This option also configures
++	  the designware-i2c driver to be built-in, for the same reason.
++
++	  Say Y or M here if you have a SkyLake device designed for use
++	  with Windows or ChromeOS. Say N here if you are not sure.
++
++	  The module will be named "intel-skl-int3472"
++
+ config INTEL_HID_EVENT
+ 	tristate "INTEL HID Event"
+ 	depends on ACPI
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index 581475f59819..3cefe67761af 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -86,6 +86,11 @@ obj-$(CONFIG_INTEL_HID_EVENT)		+= intel-hid.o
+ obj-$(CONFIG_INTEL_INT0002_VGPIO)	+= intel_int0002_vgpio.o
+ obj-$(CONFIG_INTEL_MENLOW)		+= intel_menlow.o
+ obj-$(CONFIG_INTEL_OAKTRAIL)		+= intel_oaktrail.o
++obj-$(CONFIG_INTEL_SKL_INT3472)		+= intel_skl_int3472.o
++intel_skl_int3472-objs			:= intel_skl_int3472_common.o \
++                                           intel_skl_int3472_discrete.o \
++                                           intel_skl_int3472_tps68470.o
++
+ obj-$(CONFIG_INTEL_VBTN)		+= intel-vbtn.o
+ 
+ # MSI
+diff --git a/drivers/platform/x86/intel_skl_int3472_common.c b/drivers/platform/x86/intel_skl_int3472_common.c
+new file mode 100644
+index 000000000000..08cb9d3c06aa
+--- /dev/null
++++ b/drivers/platform/x86/intel_skl_int3472_common.c
+@@ -0,0 +1,100 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Author: Dan Scally <djrscally@gmail.com> */
++
++#include <linux/acpi.h>
++#include <linux/i2c.h>
++#include <linux/platform_device.h>
++
++#include "intel_skl_int3472_common.h"
++
++int skl_int3472_get_cldb_buffer(struct acpi_device *adev,
++				struct int3472_cldb *cldb)
++{
++	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++	acpi_handle handle = adev->handle;
++	union acpi_object *obj;
++	acpi_status status;
++	int ret = 0;
++
++	status = acpi_evaluate_object(handle, "CLDB", NULL, &buffer);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	obj = buffer.pointer;
++	if (!obj) {
++		dev_err(&adev->dev, "ACPI device has no CLDB object\n");
++		return -ENODEV;
++	}
++
++	if (obj->type != ACPI_TYPE_BUFFER) {
++		dev_err(&adev->dev, "CLDB object is not an ACPI buffer\n");
++		ret = -EINVAL;
++		goto out_free_buff;
++	}
++
++	if (obj->buffer.length > sizeof(*cldb)) {
++		dev_err(&adev->dev, "The CLDB buffer is too large\n");
++		ret = -EINVAL;
++		goto out_free_buff;
++	}
++
++	memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
++
++out_free_buff:
++	kfree(buffer.pointer);
++	return ret;
++}
++
++static const struct acpi_device_id int3472_device_id[] = {
++	{ "INT3472", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, int3472_device_id);
++
++static struct platform_driver int3472_discrete = {
++	.driver = {
++		.name = "int3472-discrete",
++		.acpi_match_table = int3472_device_id,
++	},
++	.probe = skl_int3472_discrete_probe,
++	.remove = skl_int3472_discrete_remove,
++};
++
++static struct i2c_driver int3472_tps68470 = {
++	.driver = {
++		.name = "int3472-tps68470",
++		.acpi_match_table = int3472_device_id,
++	},
++	.probe_new = skl_int3472_tps68470_probe,
++};
++
++static int skl_int3472_init(void)
++{
++	int ret = 0;
++
++	ret = platform_driver_register(&int3472_discrete);
++	if (ret)
++		return ret;
++
++	ret = i2c_register_driver(THIS_MODULE, &int3472_tps68470);
++	if (ret)
++		goto err_unregister_plat_drv;
++
++	return 0;
++
++err_unregister_plat_drv:
++	platform_driver_unregister(&int3472_discrete);
++	return ret;
++}
++module_init(skl_int3472_init);
++
++static void skl_int3472_exit(void)
++{
++	platform_driver_unregister(&int3472_discrete);
++	i2c_del_driver(&int3472_tps68470);
++}
++module_exit(skl_int3472_exit);
++
++MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver");
++MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/platform/x86/intel_skl_int3472_common.h b/drivers/platform/x86/intel_skl_int3472_common.h
+new file mode 100644
+index 000000000000..4ac6bb2b223f
+--- /dev/null
++++ b/drivers/platform/x86/intel_skl_int3472_common.h
+@@ -0,0 +1,99 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Author: Dan Scally <djrscally@gmail.com> */
++#include <linux/regulator/machine.h>
++#include <linux/clk-provider.h>
++#include <linux/gpio/machine.h>
++#include <linux/regulator/driver.h>
++#include <linux/types.h>
++
++/* PMIC GPIO Types */
++#define INT3472_GPIO_TYPE_RESET					0x00
++#define INT3472_GPIO_TYPE_POWERDOWN				0x01
++#define INT3472_GPIO_TYPE_CLK_ENABLE				0x0c
++#define INT3472_GPIO_TYPE_POWER_ENABLE				0x0b
++#define INT3472_GPIO_TYPE_PRIVACY_LED				0x0d
++#define INT3472_PDEV_MAX_NAME_LEN				23
++#define INT3472_MAX_SENSOR_GPIOS				3
++#define GPIO_REGULATOR_NAME_LENGTH				27
++#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH			9
++
++#define INT3472_REGULATOR(_NAME, _SUPPLY, _OPS)			\
++	(const struct regulator_desc) {				\
++		.name = _NAME,					\
++		.supply_name = _SUPPLY,				\
++		.id = 0,					\
++		.type = REGULATOR_VOLTAGE,			\
++		.ops = _OPS,					\
++		.owner = THIS_MODULE,				\
++	}
++
++#define INT3472_GPIO_FUNCTION_REMAP(_PIN, _FUNCTION)		\
++	(const struct int3472_gpio_function_remap) {		\
++		.documented = _PIN,					\
++		.actual = _FUNCTION				\
++	}
++
++#define to_int3472_clk(hw)					\
++	container_of(hw, struct int3472_gpio_clock, clk_hw)
++
++struct int3472_cldb {
++	u8 version;
++	/*
++	 * control logic type
++	 * 0: UNKNOWN
++	 * 1: DISCRETE(CRD-D)
++	 * 2: PMIC TPS68470
++	 * 3: PMIC uP6641
++	 */
++	u8 control_logic_type;
++	u8 control_logic_id;
++	u8 sensor_card_sku;
++	u8 reserved[28];
++};
++
++struct int3472_gpio_regulator {
++	char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
++	char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH];
++	struct gpio_desc *gpio;
++	struct regulator_dev *rdev;
++	struct regulator_desc rdesc;
++};
++
++struct int3472_gpio_clock {
++	struct clk *clk;
++	struct clk_hw clk_hw;
++	struct gpio_desc *gpio;
++};
++
++struct int3472_device {
++	struct acpi_device *adev;
++	struct platform_device *pdev;
++	struct acpi_device *sensor;
++	char *sensor_name;
++
++	unsigned int n_gpios; /* how many GPIOs have we seen */
++
++	struct int3472_gpio_regulator regulator;
++	struct int3472_gpio_clock clock;
++
++	unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
++	bool gpios_mapped;
++	struct gpiod_lookup_table gpios;
++};
++
++struct int3472_gpio_function_remap {
++	char *documented;
++	char *actual;
++};
++
++struct int3472_sensor_config {
++	char *sensor_module_name;
++	struct regulator_consumer_supply supply_map;
++	const struct int3472_gpio_function_remap *function_maps;
++};
++
++int skl_int3472_discrete_probe(struct platform_device *pdev);
++int skl_int3472_discrete_remove(struct platform_device *pdev);
++int skl_int3472_tps68470_probe(struct i2c_client *client);
++int skl_int3472_get_cldb_buffer(struct acpi_device *adev,
++				struct int3472_cldb *cldb);
+diff --git a/drivers/platform/x86/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel_skl_int3472_discrete.c
+new file mode 100644
+index 000000000000..ea7e57f3e3f0
+--- /dev/null
++++ b/drivers/platform/x86/intel_skl_int3472_discrete.c
+@@ -0,0 +1,489 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Author: Dan Scally <djrscally@gmail.com> */
++
++#include <linux/acpi.h>
++#include <linux/clkdev.h>
++#include <linux/gpio/consumer.h>
++#include <linux/i2c.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/driver.h>
++
++#include "intel_skl_int3472_common.h"
++
++/* 79234640-9e10-4fea-a5c1b5aa8b19756f */
++static const guid_t int3472_gpio_guid =
++	GUID_INIT(0x79234640, 0x9e10, 0x4fea,
++		  0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
++
++/* 822ace8f-2814-4174-a56b5f029fe079ee */
++static const guid_t cio2_sensor_module_guid =
++	GUID_INIT(0x822ace8f, 0x2814, 0x4174,
++		  0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
++
++/*
++ * Here follows platform specific mapping information that we can pass to
++ * the functions mapping resources to the sensors. Where the sensors have
++ * a power enable pin defined in DSDT we need to provide a supply name so
++ * the sensor drivers can find the regulator. Optionally, we can provide a
++ * NULL terminated array of function name mappings to deal with any platform
++ * specific deviations from the documented behaviour of GPIOs.
++ *
++ * Map a GPIO function name to NULL to prevent the driver from mapping that
++ * GPIO at all.
++ */
++
++static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = {
++	INT3472_GPIO_FUNCTION_REMAP("reset", NULL),
++	INT3472_GPIO_FUNCTION_REMAP("powerdown", "reset"),
++	{ }
++};
++
++static struct int3472_sensor_config int3472_sensor_configs[] = {
++	/* Lenovo Miix 510-12ISK - OV2680, Front */
++	{ "GNDF140809R", { 0 }, ov2680_gpio_function_remaps},
++	/* Lenovo Miix 510-12ISK - OV5648, Rear */
++	{ "GEFF150023R", REGULATOR_SUPPLY("avdd", "i2c-OVTI5648:00"), NULL},
++	/* Surface Go 1&2 - OV5693, Front */
++	{ "YHCU", REGULATOR_SUPPLY("avdd", "i2c-INT33BE:00"), NULL},
++};
++
++/*
++ * The regulators have to have .ops to be valid, but the only ops we actually
++ * support are .enable and .disable which are handled via .ena_gpiod. Pass an
++ * empty struct to clear the check without lying about capabilities.
++ */
++static const struct regulator_ops int3472_gpio_regulator_ops = { 0 };
++
++static int skl_int3472_clk_enable(struct clk_hw *hw)
++{
++	struct int3472_gpio_clock *clk = to_int3472_clk(hw);
++
++	gpiod_set_value(clk->gpio, 1);
++
++	return 0;
++}
++
++static void skl_int3472_clk_disable(struct clk_hw *hw)
++{
++	struct int3472_gpio_clock *clk = to_int3472_clk(hw);
++
++	gpiod_set_value(clk->gpio, 0);
++}
++
++static int skl_int3472_clk_prepare(struct clk_hw *hw)
++{
++	/*
++	 * We're just turning a GPIO on to enable, so nothing to do here, but
++	 * we want to provide the op so prepare_enable() works.
++	 */
++	return 0;
++}
++
++static void skl_int3472_clk_unprepare(struct clk_hw *hw)
++{
++	/* Likewise, nothing to do here... */
++}
++
++static const struct clk_ops skl_int3472_clock_ops = {
++	.prepare = skl_int3472_clk_prepare,
++	.unprepare = skl_int3472_clk_unprepare,
++	.enable = skl_int3472_clk_enable,
++	.disable = skl_int3472_clk_disable,
++};
++
++static struct int3472_sensor_config *
++int3472_get_sensor_module_config(struct int3472_device *int3472)
++{
++	unsigned int i = ARRAY_SIZE(int3472_sensor_configs);
++	struct int3472_sensor_config *ret;
++	union acpi_object *obj;
++
++	obj = acpi_evaluate_dsm_typed(int3472->sensor->handle,
++				      &cio2_sensor_module_guid, 0x00,
++				      0x01, NULL, ACPI_TYPE_STRING);
++
++	if (!obj) {
++		dev_err(&int3472->pdev->dev,
++			"Failed to get sensor module string from _DSM\n");
++		return ERR_PTR(-ENODEV);
++	}
++
++	if (obj->string.type != ACPI_TYPE_STRING) {
++		dev_err(&int3472->pdev->dev,
++			"Sensor _DSM returned a non-string value\n");
++		ret = ERR_PTR(-EINVAL);
++		goto out_free_obj;
++	}
++
++	ret = ERR_PTR(-ENODEV);
++	while (i--) {
++		if (!strcmp(int3472_sensor_configs[i].sensor_module_name,
++			    obj->string.pointer)) {
++			ret = &int3472_sensor_configs[i];
++			goto out_free_obj;
++		}
++	}
++
++out_free_obj:
++	ACPI_FREE(obj);
++	return ret;
++}
++
++static int int3472_map_gpio_to_sensor(struct int3472_device *int3472,
++				      struct acpi_resource *ares,
++				      char *func, u32 polarity)
++{
++	char *path = ares->data.gpio.resource_source.string_ptr;
++	struct int3472_sensor_config *sensor_config;
++	struct gpiod_lookup table_entry;
++	struct acpi_device *adev;
++	acpi_handle handle;
++	acpi_status status;
++	int ret;
++
++	sensor_config = int3472_get_sensor_module_config(int3472);
++	if (!IS_ERR(sensor_config) && sensor_config->function_maps) {
++		unsigned int i = 0;
++
++		while (sensor_config->function_maps[i].documented) {
++			if (!strcmp(func, sensor_config->function_maps[i].documented)) {
++				func = sensor_config->function_maps[i].actual;
++
++				break;
++			}
++
++			i++;
++		}
++	}
++
++	if (!func)
++		return 0;
++
++	if (int3472->n_sensor_gpios >= INT3472_MAX_SENSOR_GPIOS) {
++		dev_warn(&int3472->pdev->dev, "Too many GPIOs mapped\n");
++		return -EINVAL;
++	}
++
++	status = acpi_get_handle(NULL, path, &handle);
++	if (ACPI_FAILURE(status))
++		return -EINVAL;
++
++	ret = acpi_bus_get_device(handle, &adev);
++	if (ret)
++		return -ENODEV;
++
++	table_entry = (struct gpiod_lookup)GPIO_LOOKUP_IDX(acpi_dev_name(adev),
++							   ares->data.gpio.pin_table[0],
++							   func, 0, polarity);
++
++	memcpy(&int3472->gpios.table[int3472->n_sensor_gpios], &table_entry,
++	       sizeof(table_entry));
++
++	int3472->n_sensor_gpios++;
++
++	return 0;
++}
++
++static int int3472_register_clock(struct int3472_device *int3472,
++				  struct acpi_resource *ares)
++{
++	char *path = ares->data.gpio.resource_source.string_ptr;
++	struct clk_init_data init = { };
++	int ret = 0;
++
++	init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(int3472->adev));
++	init.ops = &skl_int3472_clock_ops;
++
++	int3472->clock.gpio = acpi_get_gpiod(path, ares->data.gpio.pin_table[0]);
++	if (IS_ERR(int3472->clock.gpio)) {
++		ret = PTR_ERR(int3472->clock.gpio);
++		goto out_free_init_name;
++	}
++
++	int3472->clock.clk_hw.init = &init;
++	int3472->clock.clk = clk_register(&int3472->adev->dev,
++					  &int3472->clock.clk_hw);
++	if (IS_ERR(int3472->clock.clk)) {
++		ret = PTR_ERR(int3472->clock.clk);
++		goto err_put_gpio;
++	}
++
++	ret = clk_register_clkdev(int3472->clock.clk, "xvclk", int3472->sensor_name);
++	if (ret)
++		goto err_unregister_clk;
++
++	goto out_free_init_name;
++
++err_unregister_clk:
++	clk_unregister(int3472->clock.clk);
++err_put_gpio:
++	gpiod_put(int3472->clock.gpio);
++out_free_init_name:
++	kfree(init.name);
++
++	return ret;
++}
++
++static int int3472_register_regulator(struct int3472_device *int3472,
++				      struct acpi_resource *ares)
++{
++	char *path = ares->data.gpio.resource_source.string_ptr;
++	struct int3472_sensor_config *sensor_config;
++	struct regulator_init_data init_data = { };
++	struct int3472_gpio_regulator *regulator;
++	struct regulator_config cfg = { };
++	int ret;
++
++	sensor_config = int3472_get_sensor_module_config(int3472);
++	if (IS_ERR_OR_NULL(sensor_config)) {
++		dev_err(&int3472->pdev->dev, "No sensor module config\n");
++		return PTR_ERR(sensor_config);
++	}
++
++	if (!sensor_config->supply_map.supply) {
++		dev_err(&int3472->pdev->dev, "No supply name defined\n");
++		return -ENODEV;
++	}
++
++	init_data.supply_regulator = NULL;
++	init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
++	init_data.num_consumer_supplies = 1;
++	init_data.consumer_supplies = &sensor_config->supply_map;
++
++	snprintf(int3472->regulator.regulator_name, GPIO_REGULATOR_NAME_LENGTH,
++		 "int3472-discrete-regulator");
++	snprintf(int3472->regulator.supply_name, GPIO_REGULATOR_SUPPLY_NAME_LENGTH,
++		 "supply-0");
++
++	int3472->regulator.rdesc = INT3472_REGULATOR(int3472->regulator.regulator_name,
++						     int3472->regulator.supply_name,
++						     &int3472_gpio_regulator_ops);
++
++	int3472->regulator.gpio = acpi_get_gpiod(path, ares->data.gpio.pin_table[0]);
++	if (IS_ERR(int3472->regulator.gpio)) {
++		ret = PTR_ERR(int3472->regulator.gpio);
++		goto err_free_regulator;
++	}
++
++	cfg.dev = &int3472->adev->dev;
++	cfg.init_data = &init_data;
++	cfg.ena_gpiod = int3472->regulator.gpio;
++
++	int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc, &cfg);
++	if (IS_ERR(int3472->regulator.rdev)) {
++		ret = PTR_ERR(int3472->regulator.rdev);
++		goto err_free_gpio;
++	}
++
++	return 0;
++
++err_free_gpio:
++	gpiod_put(regulator->gpio);
++err_free_regulator:
++	kfree(regulator);
++
++	return ret;
++}
++
++/**
++ * int3472_handle_gpio_resources: maps PMIC resources to consuming sensor
++ * @ares: A pointer to a &struct acpi_resource
++ * @data: A pointer to a &struct int3472_device
++ *
++ * This function handles GPIO resources that are against an INT3472
++ * ACPI device, by checking the value of the corresponding _DSM entry.
++ * This will return a 32bit int, where the lowest byte represents the
++ * function of the GPIO pin:
++ *
++ * 0x00 Reset
++ * 0x01 Power down
++ * 0x0b Power enable
++ * 0x0c Clock enable
++ * 0x0d Privacy LED
++ *
++ * There are some known platform specific quirks where that does not quite
++ * hold up; for example where a pin with type 0x01 (Power down) is mapped to
++ * a sensor pin that performs a reset function. These will be handled by the
++ * mapping sub-functions.
++ *
++ * GPIOs will either be mapped directly to the sensor device or else used
++ * to create clocks and regulators via the usual frameworks.
++ *
++ * Return:
++ * * 0		- When all resources found are handled properly.
++ * * -EINVAL	- If the resource is not a GPIO IO resource
++ * * -ENODEV	- If the resource has no corresponding _DSM entry
++ * * -Other	- Errors propagated from one of the sub-functions.
++ */
++static int int3472_handle_gpio_resources(struct acpi_resource *ares,
++					 void *data)
++{
++	struct int3472_device *int3472 = data;
++	union acpi_object *obj;
++	int ret = 0;
++
++	if (ares->type != ACPI_RESOURCE_TYPE_GPIO ||
++	    ares->data.gpio.connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
++		return EINVAL; /* Deliberately positive so parsing continues */
++
++	/*
++	 * n_gpios + 2 because the index of this _DSM function is 1-based and
++	 * the first function is just a count.
++	 */
++	obj = acpi_evaluate_dsm_typed(int3472->adev->handle,
++				      &int3472_gpio_guid, 0x00,
++				      int3472->n_gpios + 2,
++				      NULL, ACPI_TYPE_INTEGER);
++
++	if (!obj) {
++		dev_warn(&int3472->pdev->dev,
++			 "No _DSM entry for this GPIO pin\n");
++		return ENODEV;
++	}
++
++	switch (obj->integer.value & 0xff) {
++	case INT3472_GPIO_TYPE_RESET:
++		ret = int3472_map_gpio_to_sensor(int3472, ares, "reset",
++						 GPIO_ACTIVE_LOW);
++		if (ret)
++			dev_err(&int3472->pdev->dev,
++				"Failed to map reset pin to sensor\n");
++
++		break;
++	case INT3472_GPIO_TYPE_POWERDOWN:
++		ret = int3472_map_gpio_to_sensor(int3472, ares, "powerdown",
++						 GPIO_ACTIVE_LOW);
++		if (ret)
++			dev_err(&int3472->pdev->dev,
++				"Failed to map powerdown pin to sensor\n");
++
++		break;
++	case INT3472_GPIO_TYPE_CLK_ENABLE:
++		ret = int3472_register_clock(int3472, ares);
++		if (ret)
++			dev_err(&int3472->pdev->dev,
++				"Failed to map clock to sensor\n");
++
++		break;
++	case INT3472_GPIO_TYPE_POWER_ENABLE:
++		ret = int3472_register_regulator(int3472, ares);
++		if (ret) {
++			dev_err(&int3472->pdev->dev,
++				"Failed to map regulator to sensor\n");
++		}
++
++		break;
++	case INT3472_GPIO_TYPE_PRIVACY_LED:
++		ret = int3472_map_gpio_to_sensor(int3472, ares, "indicator-led",
++						 GPIO_ACTIVE_HIGH);
++		if (ret)
++			dev_err(&int3472->pdev->dev,
++				"Failed to map indicator led to sensor\n");
++
++		break;
++	default:
++		dev_warn(&int3472->pdev->dev,
++			 "GPIO type 0x%llx unknown; the sensor may not work\n",
++			 (obj->integer.value & 0xff));
++		ret = EINVAL;
++	}
++
++	int3472->n_gpios++;
++	ACPI_FREE(obj);
++
++	return ret;
++}
++
++static int int3472_parse_crs(struct int3472_device *int3472)
++{
++	struct list_head resource_list;
++	int ret = 0;
++
++	INIT_LIST_HEAD(&resource_list);
++
++	ret = acpi_dev_get_resources(int3472->adev, &resource_list,
++				     int3472_handle_gpio_resources, int3472);
++
++	if (!ret) {
++		gpiod_add_lookup_table(&int3472->gpios);
++		int3472->gpios_mapped = true;
++	}
++
++	acpi_dev_free_resource_list(&resource_list);
++
++	return ret;
++}
++
++int skl_int3472_discrete_probe(struct platform_device *pdev)
++{
++	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
++	struct int3472_device *int3472;
++	struct int3472_cldb cldb;
++	int ret = 0;
++
++	ret = skl_int3472_get_cldb_buffer(adev, &cldb);
++	if (ret || cldb.control_logic_type != 1)
++		return -EINVAL;
++
++	int3472 = kzalloc(sizeof(*int3472) +
++			 ((INT3472_MAX_SENSOR_GPIOS + 1) * sizeof(struct gpiod_lookup)),
++			 GFP_KERNEL);
++	if (!int3472)
++		return -ENOMEM;
++
++	int3472->adev = adev;
++	int3472->pdev = pdev;
++	platform_set_drvdata(pdev, int3472);
++
++	int3472->sensor = acpi_dev_get_next_dep_dev(adev, NULL);
++	if (!int3472->sensor) {
++		dev_err(&pdev->dev,
++			"This INT3472 entry seems to have no dependents.\n");
++		ret = -ENODEV;
++		goto err_free_int3472;
++	}
++	int3472->sensor_name = i2c_acpi_dev_name(int3472->sensor);
++	int3472->gpios.dev_id = int3472->sensor_name;
++
++	ret = int3472_parse_crs(int3472);
++	if (ret) {
++		skl_int3472_discrete_remove(pdev);
++		goto err_return_ret;
++	}
++
++	return 0;
++
++err_free_int3472:
++	kfree(int3472);
++err_return_ret:
++	return ret;
++}
++
++int skl_int3472_discrete_remove(struct platform_device *pdev)
++{
++	struct int3472_device *int3472;
++
++	int3472 = platform_get_drvdata(pdev);
++
++	if (int3472->gpios_mapped)
++		gpiod_remove_lookup_table(&int3472->gpios);
++
++	if (!IS_ERR_OR_NULL(int3472->regulator.rdev)) {
++		gpiod_put(int3472->regulator.gpio);
++		regulator_unregister(int3472->regulator.rdev);
++	}
++
++	if (!IS_ERR_OR_NULL(int3472->clock.clk)) {
++		gpiod_put(int3472->clock.gpio);
++		clk_unregister(int3472->clock.clk);
++	}
++
++	acpi_dev_put(int3472->sensor);
++
++	kfree(int3472->sensor_name);
++	kfree(int3472);
++
++	return 0;
++}
+diff --git a/drivers/platform/x86/intel_skl_int3472_tps68470.c b/drivers/platform/x86/intel_skl_int3472_tps68470.c
+new file mode 100644
+index 000000000000..3fe27ec0caff
+--- /dev/null
++++ b/drivers/platform/x86/intel_skl_int3472_tps68470.c
+@@ -0,0 +1,145 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Author: Dan Scally <djrscally@gmail.com> */
++
++#include <linux/i2c.h>
++#include <linux/mfd/tps68470.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++
++#include "intel_skl_int3472_common.h"
++
++static const struct regmap_config tps68470_regmap_config = {
++	.reg_bits = 8,
++	.val_bits = 8,
++	.max_register = TPS68470_REG_MAX,
++};
++
++static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
++{
++	unsigned int version;
++	int ret;
++
++	/* Force software reset */
++	ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
++	if (ret)
++		return ret;
++
++	ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
++	if (ret) {
++		dev_err(dev, "Failed to read revision register: %d\n", ret);
++		return ret;
++	}
++
++	dev_info(dev, "TPS68470 REVID: 0x%x\n", version);
++
++	return 0;
++}
++
++static struct platform_device *
++skl_int3472_register_pdev(const char *name, struct device *parent)
++{
++	struct platform_device *pdev;
++	int ret;
++
++	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
++	if (IS_ERR_OR_NULL(pdev))
++		return ERR_PTR(-ENOMEM);
++
++	pdev->dev.parent = parent;
++	pdev->driver_override = kstrndup(pdev->name, INT3472_PDEV_MAX_NAME_LEN,
++					 GFP_KERNEL);
++
++	ret = platform_device_add(pdev);
++	if (ret) {
++		platform_device_put(pdev);
++		return ERR_PTR(ret);
++	}
++
++	return pdev;
++}
++
++int skl_int3472_tps68470_probe(struct i2c_client *client)
++{
++	struct acpi_device *adev = ACPI_COMPANION(&client->dev);
++	struct platform_device *regulator_dev;
++	struct platform_device *opregion_dev;
++	struct platform_device *gpio_dev;
++	struct int3472_cldb cldb = { 0 };
++	struct platform_device *clk_dev;
++	bool cldb_present = true;
++	struct regmap *regmap;
++	int ret = 0;
++
++	regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
++	if (IS_ERR(regmap)) {
++		dev_err(&client->dev, "devm_regmap_init_i2c Error %ld\n",
++			PTR_ERR(regmap));
++		return PTR_ERR(regmap);
++	}
++
++	i2c_set_clientdata(client, regmap);
++
++	ret = tps68470_chip_init(&client->dev, regmap);
++	if (ret < 0) {
++		dev_err(&client->dev, "TPS68470 Init Error %d\n", ret);
++		return ret;
++	}
++
++	/*
++	 * Check CLDB buffer against the PMIC's adev. If present, then we check
++	 * the value of control_logic_type field and follow one of the following
++	 * scenarios:
++	 *
++	 *	1. No CLDB - likely ACPI tables designed for ChromeOS. We create
++	 *	platform devices for the GPIOs and OpRegion drivers.
++	 *
++	 *	2. CLDB, with control_logic_type = 2 - probably ACPI tables made
++	 *	for Windows 2-in-1 platforms. Register pdevs for GPIO, Clock and
++	 *	Regulator drivers to bind to.
++	 *
++	 *	3. Any other value in control_logic_type, we should never have
++	 *	gotten to this point; crash and burn.
++	 */
++	ret = skl_int3472_get_cldb_buffer(adev, &cldb);
++	if (!ret && cldb.control_logic_type != 2)
++		return -EINVAL;
++
++	if (ret)
++		cldb_present = false;
++
++	gpio_dev = skl_int3472_register_pdev("tps68470-gpio", &client->dev);
++	if (IS_ERR(gpio_dev))
++		return PTR_ERR(gpio_dev);
++
++	if (cldb_present) {
++		clk_dev = skl_int3472_register_pdev("tps68470-clk",
++						    &client->dev);
++		if (IS_ERR(clk_dev)) {
++			ret = PTR_ERR(clk_dev);
++			goto err_free_gpio;
++		}
++
++		regulator_dev = skl_int3472_register_pdev("tps68470-regulator",
++							  &client->dev);
++		if (IS_ERR(regulator_dev)) {
++			ret = PTR_ERR(regulator_dev);
++			goto err_free_clk;
++		}
++	} else {
++		opregion_dev = skl_int3472_register_pdev("tps68470_pmic_opregion",
++							 &client->dev);
++		if (IS_ERR(opregion_dev)) {
++			ret = PTR_ERR(opregion_dev);
++			goto err_free_gpio;
++		}
++	}
++
++	return 0;
++
++err_free_clk:
++	platform_device_put(clk_dev);
++err_free_gpio:
++	platform_device_put(gpio_dev);
++
++	return ret;
++}
+-- 
+2.30.1
+
+From 5c51db3de4fed9f76e34fb07369235cadc0667d9 Mon Sep 17 00:00:00 2001
+From: Jake Day <jake@ninebysix.com>
+Date: Fri, 25 Sep 2020 10:24:53 -0400
+Subject: [PATCH] media: i2c: Add support for the OV5693 image sensor
+
+The OV5693 is a 5 Mpx CMOS image sensor, connected via MIPI CSI-2
+in a one or two lane configuration.
+
+Signed-off-by: Jean-Michel Hautbois <jeanmichel.hautbois@gmail.com>
+Patchset: cameras
+---
+ drivers/media/i2c/Kconfig  |   11 +
+ drivers/media/i2c/Makefile |    1 +
+ drivers/media/i2c/ad5823.h |   63 ++
+ drivers/media/i2c/ov5693.c | 1788 ++++++++++++++++++++++++++++++++++++
+ drivers/media/i2c/ov5693.h | 1430 ++++++++++++++++++++++++++++
+ 5 files changed, 3293 insertions(+)
+ create mode 100644 drivers/media/i2c/ad5823.h
+ create mode 100644 drivers/media/i2c/ov5693.c
+ create mode 100644 drivers/media/i2c/ov5693.h
+
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 2b9d81e4794a..3099cb91ef21 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -972,6 +972,17 @@ config VIDEO_OV5675
+ 	  To compile this driver as a module, choose M here: the
+ 	  module will be called ov5675.
+ 
++config VIDEO_OV5693
++	tristate "OmniVision OV5693 sensor support"
++	depends on I2C && VIDEO_V4L2
++	select V4L2_FWNODE
++	help
++	  This is a Video4Linux2 sensor driver for the OmniVision
++	  OV5693 camera.
++
++	  To compile this driver as a module, choose M here: the
++	  module will be called ov5693.
++
+ config VIDEO_OV5695
+ 	tristate "OmniVision OV5695 sensor support"
+ 	depends on I2C && VIDEO_V4L2
+diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
+index a3149dce21bb..cac649668a4e 100644
+--- a/drivers/media/i2c/Makefile
++++ b/drivers/media/i2c/Makefile
+@@ -74,6 +74,7 @@ obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
+ obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
+ obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
+ obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
++obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+ obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
+ obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
+ obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
+diff --git a/drivers/media/i2c/ad5823.h b/drivers/media/i2c/ad5823.h
+new file mode 100644
+index 000000000000..f1362cd69f6e
+--- /dev/null
++++ b/drivers/media/i2c/ad5823.h
+@@ -0,0 +1,63 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Support for AD5823 VCM.
++ *
++ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ *
++ */
++
++#ifndef __AD5823_H__
++#define __AD5823_H__
++
++#include <linux/types.h>
++
++#define AD5823_VCM_ADDR	0x0c
++
++#define AD5823_REG_RESET		0x01
++#define AD5823_REG_MODE			0x02
++#define AD5823_REG_VCM_MOVE_TIME	0x03
++#define AD5823_REG_VCM_CODE_MSB		0x04
++#define AD5823_REG_VCM_CODE_LSB		0x05
++#define AD5823_REG_VCM_THRESHOLD_MSB	0x06
++#define AD5823_REG_VCM_THRESHOLD_LSB	0x07
++
++#define AD5823_REG_LENGTH		0x1
++
++#define AD5823_RING_CTRL_ENABLE		0x04
++#define AD5823_RING_CTRL_DISABLE	0x00
++
++#define AD5823_RESONANCE_PERIOD		100000
++#define AD5823_RESONANCE_COEF		512
++#define AD5823_HIGH_FREQ_RANGE		0x80
++
++#define VCM_CODE_MSB_MASK		0xfc
++#define AD5823_INIT_FOCUS_POS           350
++
++enum ad5823_tok_type {
++	AD5823_8BIT  = 0x1,
++	AD5823_16BIT = 0x2,
++};
++
++enum ad5823_vcm_mode {
++	AD5823_ARC_RES0 = 0x0,	/* Actuator response control RES1 */
++	AD5823_ARC_RES1 = 0x1,	/* Actuator response control RES0.5 */
++	AD5823_ARC_RES2 = 0x2,	/* Actuator response control RES2 */
++	AD5823_ESRC = 0x3,	/* Enhanced slew rate control */
++	AD5823_DIRECT = 0x4,	/* Direct control */
++};
++
++#define AD5823_INVALID_CONFIG	0xffffffff
++#define AD5823_MAX_FOCUS_POS	1023
++#define DELAY_PER_STEP_NS	1000000
++#define DELAY_MAX_PER_STEP_NS	(1000000 * 1023)
++#endif
+diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
+new file mode 100644
+index 000000000000..32485e4ed42b
+--- /dev/null
++++ b/drivers/media/i2c/ov5693.c
+@@ -0,0 +1,1788 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Support for OmniVision OV5693 1080p HD camera sensor.
++ *
++ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ *
++ */
++
++#include <linux/clk.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/moduleparam.h>
++#include <media/v4l2-device.h>
++#include <linux/io.h>
++#include <linux/acpi.h>
++#include <linux/regulator/consumer.h>
++
++#include "ov5693.h"
++#include "ad5823.h"
++
++#define __cci_delay(t) \
++	do { \
++		if ((t) < 10) { \
++			usleep_range((t) * 1000, ((t) + 1) * 1000); \
++		} else { \
++			msleep((t)); \
++		} \
++	} while (0)
++
++/* Value 30ms reached through experimentation on byt ecs.
++ * The DS specifies a much lower value but when using a smaller value
++ * the I2C bus sometimes locks up permanently when starting the camera.
++ * This issue could not be reproduced on cht, so we can reduce the
++ * delay value to a lower value when insmod.
++ */
++static uint up_delay = 30;
++module_param(up_delay, uint, 0644);
++MODULE_PARM_DESC(up_delay,
++		 "Delay prior to the first CCI transaction for ov5693");
++
++
++/* Exposure/gain */
++
++#define OV5693_EXPOSURE_CTRL_HH_REG		0x3500
++#define OV5693_EXPOSURE_CTRL_HH(v)		(((v) & GENMASK(18, 16)) >> 16)
++#define OV5693_EXPOSURE_CTRL_H_REG		0x3501
++#define OV5693_EXPOSURE_CTRL_H(v)		(((v) & GENMASK(15, 8)) >> 8)
++#define OV5693_EXPOSURE_CTRL_L_REG		0x3502
++#define OV5693_EXPOSURE_CTRL_L(v)		((v) & GENMASK(7, 0))
++#define OV5693_EXPOSURE_GAIN_MANUAL_REG		0x3509
++
++#define OV5693_GAIN_CTRL_H_REG			0x3504
++#define OV5693_GAIN_CTRL_H(v)			(((v) & GENMASK(9, 8)) >> 8)
++#define OV5693_GAIN_CTRL_L_REG			0x3505
++#define OV5693_GAIN_CTRL_L(v)			((v) & GENMASK(7, 0))
++
++#define OV5693_FORMAT1_REG			0x3820
++#define OV5693_FORMAT1_FLIP_VERT_ISP_EN		BIT(2)
++#define OV5693_FORMAT1_FLIP_VERT_SENSOR_EN	BIT(1)
++#define OV5693_FORMAT2_REG			0x3821
++#define OV5693_FORMAT2_HSYNC_EN			BIT(6)
++#define OV5693_FORMAT2_FST_VBIN_EN		BIT(5)
++#define OV5693_FORMAT2_FST_HBIN_EN		BIT(4)
++#define OV5693_FORMAT2_ISP_HORZ_VAR2_EN		BIT(3)
++#define OV5693_FORMAT2_FLIP_HORZ_ISP_EN		BIT(2)
++#define OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN	BIT(1)
++#define OV5693_FORMAT2_SYNC_HBIN_EN		BIT(0)
++
++/* ISP */
++
++#define OV5693_ISP_CTRL0_REG			0x5000
++#define OV5693_ISP_CTRL0_LENC_EN		BIT(7)
++#define OV5693_ISP_CTRL0_WHITE_BALANCE_EN	BIT(4)
++#define OV5693_ISP_CTRL0_DPC_BLACK_EN		BIT(2)
++#define OV5693_ISP_CTRL0_DPC_WHITE_EN		BIT(1)
++#define OV5693_ISP_CTRL1_REG			0x5001
++#define OV5693_ISP_CTRL1_BLC_EN			BIT(0)
++
++/* native and active pixel array size. */
++#define OV5693_NATIVE_WIDTH		2688U
++#define OV5693_NATIVE_HEIGHT		1984U
++#define OV5693_PIXEL_ARRAY_LEFT		48U
++#define OV5693_PIXEL_ARRAY_TOP		20U
++#define OV5693_PIXEL_ARRAY_WIDTH	2592U
++#define OV5693_PIXEL_ARRAY_HEIGHT	1944U
++
++#define	OV5693_PPL_DEFAULT		2800
++
++static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
++{
++	int err;
++	struct i2c_msg msg;
++	u8 buf[2];
++
++	buf[0] = reg;
++	buf[1] = val;
++
++	msg.addr = VCM_ADDR;
++	msg.flags = 0;
++	msg.len = 2;
++	msg.buf = &buf[0];
++
++	err = i2c_transfer(client->adapter, &msg, 1);
++	if (err != 1) {
++		dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n",
++			__func__, err);
++		return -EIO;
++	}
++	return 0;
++}
++
++static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val)
++{
++	struct i2c_msg msg;
++	u8 buf[2];
++
++	buf[0] = reg;
++	buf[1] = val;
++	msg.addr = AD5823_VCM_ADDR;
++	msg.flags = 0;
++	msg.len = 0x02;
++	msg.buf = &buf[0];
++
++	if (i2c_transfer(client->adapter, &msg, 1) != 1)
++		return -EIO;
++	return 0;
++}
++
++static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
++{
++	struct i2c_msg msg[2];
++	u8 buf[2];
++
++	buf[0] = reg;
++	buf[1] = 0;
++
++	msg[0].addr = AD5823_VCM_ADDR;
++	msg[0].flags = 0;
++	msg[0].len = 0x01;
++	msg[0].buf = &buf[0];
++
++	msg[1].addr = 0x0c;
++	msg[1].flags = I2C_M_RD;
++	msg[1].len = 0x01;
++	msg[1].buf = &buf[1];
++	*val = 0;
++	if (i2c_transfer(client->adapter, msg, 2) != 2)
++		return -EIO;
++	*val = buf[1];
++	return 0;
++}
++
++static const u32 ov5693_embedded_effective_size = 28;
++
++/* i2c read/write stuff */
++static int ov5693_read_reg(struct i2c_client *client,
++			   u16 data_length, u16 reg, u16 *val)
++{
++	int err;
++	struct i2c_msg msg[2];
++	unsigned char data[6];
++
++	if (!client->adapter) {
++		dev_err(&client->dev, "%s error, no client->adapter\n",
++			__func__);
++		return -ENODEV;
++	}
++
++	if (data_length != OV5693_8BIT && data_length != OV5693_16BIT
++	    && data_length != OV5693_32BIT) {
++		dev_err(&client->dev, "%s error, invalid data length\n",
++			__func__);
++		return -EINVAL;
++	}
++
++	memset(msg, 0, sizeof(msg));
++
++	msg[0].addr = client->addr;
++	msg[0].flags = 0;
++	msg[0].len = I2C_MSG_LENGTH;
++	msg[0].buf = data;
++
++	/* high byte goes out first */
++	data[0] = (u8)(reg >> 8);
++	data[1] = (u8)(reg & 0xff);
++
++	msg[1].addr = client->addr;
++	msg[1].len = data_length;
++	msg[1].flags = I2C_M_RD;
++	msg[1].buf = data;
++
++	err = i2c_transfer(client->adapter, msg, 2);
++	if (err != 2) {
++		if (err >= 0)
++			err = -EIO;
++		dev_err(&client->dev,
++			"read from offset 0x%x error %d", reg, err);
++		return err;
++	}
++
++	*val = 0;
++	/* high byte comes first */
++	if (data_length == OV5693_8BIT)
++		*val = (u8)data[0];
++	else if (data_length == OV5693_16BIT)
++		*val = be16_to_cpu(*(__be16 *)&data[0]);
++	else
++		*val = be32_to_cpu(*(__be32 *)&data[0]);
++
++	return 0;
++}
++
++static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data)
++{
++	struct i2c_msg msg;
++	const int num_msg = 1;
++	int ret;
++
++	msg.addr = client->addr;
++	msg.flags = 0;
++	msg.len = len;
++	msg.buf = data;
++	ret = i2c_transfer(client->adapter, &msg, 1);
++
++	return ret == num_msg ? 0 : -EIO;
++}
++
++static int vcm_dw_i2c_write(struct i2c_client *client, u16 data)
++{
++	struct i2c_msg msg;
++	const int num_msg = 1;
++	int ret;
++	__be16 val;
++
++	val = cpu_to_be16(data);
++	msg.addr = VCM_ADDR;
++	msg.flags = 0;
++	msg.len = OV5693_16BIT;
++	msg.buf = (void *)&val;
++
++	ret = i2c_transfer(client->adapter, &msg, 1);
++
++	return ret == num_msg ? 0 : -EIO;
++}
++
++/*
++ * Theory: per datasheet, the two VCMs both allow for a 2-byte read.
++ * The DW9714 doesn't actually specify what this does (it has a
++ * two-byte write-only protocol, but specifies the read sequence as
++ * legal), but it returns the same data (zeroes) always, after an
++ * undocumented initial NAK.  The AD5823 has a one-byte address
++ * register to which all writes go, and subsequent reads will cycle
++ * through the 8 bytes of registers.  Notably, the default values (the
++ * device is always power-cycled affirmatively, so we can rely on
++ * these) in AD5823 are not pairwise repetitions of the same 16 bit
++ * word.  So all we have to do is sequentially read two bytes at a
++ * time and see if we detect a difference in any of the first four
++ * pairs.
++ */
++static int vcm_detect(struct i2c_client *client)
++{
++	int i, ret;
++	struct i2c_msg msg;
++	u16 data0 = 0, data;
++
++	for (i = 0; i < 4; i++) {
++		msg.addr = VCM_ADDR;
++		msg.flags = I2C_M_RD;
++		msg.len = sizeof(data);
++		msg.buf = (u8 *)&data;
++		ret = i2c_transfer(client->adapter, &msg, 1);
++
++		/*
++		 * DW9714 always fails the first read and returns
++		 * zeroes for subsequent ones
++		 */
++		if (i == 0 && ret == -EREMOTEIO) {
++			data0 = 0;
++			continue;
++		}
++
++		if (i == 0)
++			data0 = data;
++
++		if (data != data0)
++			return VCM_AD5823;
++	}
++	return ret == 1 ? VCM_DW9714 : ret;
++}
++
++static int ov5693_write_reg(struct i2c_client *client, u16 data_length,
++			    u16 reg, u16 val)
++{
++	int ret;
++	unsigned char data[4] = {0};
++	__be16 *wreg = (void *)data;
++	const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
++
++	if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) {
++		dev_err(&client->dev,
++			"%s error, invalid data_length\n", __func__);
++		return -EINVAL;
++	}
++
++	/* high byte goes out first */
++	*wreg = cpu_to_be16(reg);
++
++	if (data_length == OV5693_8BIT) {
++		data[2] = (u8)(val);
++	} else {
++		/* OV5693_16BIT */
++		__be16 *wdata = (void *)&data[2];
++
++		*wdata = cpu_to_be16(val);
++	}
++
++	ret = ov5693_i2c_write(client, len, data);
++	if (ret)
++		dev_err(&client->dev,
++			"write error: wrote 0x%x to offset 0x%x error %d",
++			val, reg, ret);
++
++	return ret;
++}
++
++/*
++ * ov5693_write_reg_array - Initializes a list of OV5693 registers
++ * @client: i2c driver client structure
++ * @reglist: list of registers to be written
++ *
++ * This function initializes a list of registers. When consecutive addresses
++ * are found in a row on the list, this function creates a buffer and sends
++ * consecutive data in a single i2c_transfer().
++ *
++ * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and
++ * __ov5693_write_reg_is_consecutive() are internal functions to
++ * ov5693_write_reg_array_fast() and should be not used anywhere else.
++ *
++ */
++
++static int __ov5693_flush_reg_array(struct i2c_client *client,
++				    struct ov5693_write_ctrl *ctrl)
++{
++	u16 size;
++	__be16 *reg = (void *)&ctrl->buffer.addr;
++
++	if (ctrl->index == 0)
++		return 0;
++
++	size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
++
++	*reg = cpu_to_be16(ctrl->buffer.addr);
++	ctrl->index = 0;
++
++	return ov5693_i2c_write(client, size, (u8 *)reg);
++}
++
++static int __ov5693_buf_reg_array(struct i2c_client *client,
++				  struct ov5693_write_ctrl *ctrl,
++				  const struct ov5693_reg *next)
++{
++	int size;
++	__be16 *data16;
++
++	switch (next->type) {
++	case OV5693_8BIT:
++		size = 1;
++		ctrl->buffer.data[ctrl->index] = (u8)next->val;
++		break;
++	case OV5693_16BIT:
++		size = 2;
++
++		data16 = (void *)&ctrl->buffer.data[ctrl->index];
++		*data16 = cpu_to_be16((u16)next->val);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	/* When first item is added, we need to store its starting address */
++	if (ctrl->index == 0)
++		ctrl->buffer.addr = next->reg;
++
++	ctrl->index += size;
++
++	/*
++	 * Buffer cannot guarantee free space for u32? Better flush it to avoid
++	 * possible lack of memory for next item.
++	 */
++	if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE)
++		return __ov5693_flush_reg_array(client, ctrl);
++
++	return 0;
++}
++
++static int __ov5693_write_reg_is_consecutive(struct i2c_client *client,
++	struct ov5693_write_ctrl *ctrl,
++	const struct ov5693_reg *next)
++{
++	if (ctrl->index == 0)
++		return 1;
++
++	return ctrl->buffer.addr + ctrl->index == next->reg;
++}
++
++static int ov5693_write_reg_array(struct i2c_client *client,
++				  const struct ov5693_reg *reglist)
++{
++	const struct ov5693_reg *next = reglist;
++	struct ov5693_write_ctrl ctrl;
++	int err;
++
++	ctrl.index = 0;
++	for (; next->type != OV5693_TOK_TERM; next++) {
++		switch (next->type & OV5693_TOK_MASK) {
++		case OV5693_TOK_DELAY:
++			err = __ov5693_flush_reg_array(client, &ctrl);
++			if (err)
++				return err;
++			msleep(next->val);
++			break;
++		default:
++			/*
++			 * If next address is not consecutive, data needs to be
++			 * flushed before proceed.
++			 */
++			if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
++							       next)) {
++				err = __ov5693_flush_reg_array(client, &ctrl);
++				if (err)
++					return err;
++			}
++			err = __ov5693_buf_reg_array(client, &ctrl, next);
++			if (err) {
++				dev_err(&client->dev,
++					"%s: write error, aborted\n",
++					__func__);
++				return err;
++			}
++			break;
++		}
++	}
++
++	return __ov5693_flush_reg_array(client, &ctrl);
++}
++
++static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
++				     u16 addr, u8 *buf)
++{
++	u16 index;
++	int ret;
++	u16 *pVal = NULL;
++
++	for (index = 0; index <= size; index++) {
++		pVal = (u16 *)(buf + index);
++		ret =
++		    ov5693_read_reg(client, OV5693_8BIT, addr + index,
++				    pVal);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	int ret;
++	int i;
++	u8 *b = buf;
++
++	dev->otp_size = 0;
++	for (i = 1; i < OV5693_OTP_BANK_MAX; i++) {
++		/*set bank NO and OTP read mode. */
++		ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG,
++				       (i | 0xc0));	//[7:6] 2'b11 [5:0] bank no
++		if (ret) {
++			dev_err(&client->dev, "failed to prepare OTP page\n");
++			return ret;
++		}
++		//dev_dbg(&client->dev, "write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0));
++
++		/*enable read */
++		ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG,
++				       OV5693_OTP_MODE_READ);	// enable :1
++		if (ret) {
++			dev_err(&client->dev,
++				"failed to set OTP reading mode page");
++			return ret;
++		}
++		//dev_dbg(&client->dev, "write 0x%x->0x%x\n",
++		//	OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ);
++
++		/* Reading the OTP data array */
++		ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE,
++						OV5693_OTP_START_ADDR,
++						b);
++		if (ret) {
++			dev_err(&client->dev, "failed to read OTP data\n");
++			return ret;
++		}
++
++		//dev_dbg(&client->dev,
++		//	"BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
++		//	i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7),
++		//	*(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15));
++
++		//Intel OTP map, try to read 320byts first.
++		if (i == 21) {
++			if ((*b) == 0) {
++				dev->otp_size = 320;
++				break;
++			}
++			/* (*b) != 0 */
++			b = buf;
++			continue;
++		} else if (i ==
++			   24) {		//if the first 320bytes data doesn't not exist, try to read the next 32bytes data.
++			if ((*b) == 0) {
++				dev->otp_size = 32;
++				break;
++			}
++			/* (*b) != 0 */
++			b = buf;
++			continue;
++		} else if (i ==
++			   27) {		//if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again.
++			if ((*b) == 0) {
++				dev->otp_size = 32;
++				break;
++			}
++			/* (*b) != 0 */
++			dev->otp_size = 0;	// no OTP data.
++			break;
++		}
++
++		b = b + OV5693_OTP_BANK_SIZE;
++	}
++	return 0;
++}
++
++/*
++ * Read otp data and store it into a kmalloced buffer.
++ * The caller must kfree the buffer when no more needed.
++ * @size: set to the size of the returned otp data.
++ */
++static void *ov5693_otp_read(struct v4l2_subdev *sd)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	u8 *buf;
++	int ret;
++
++	buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL);
++	if (!buf)
++		return ERR_PTR(-ENOMEM);
++
++	//otp valid after mipi on and sw stream on
++	ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00);
++
++	ret = ov5693_write_reg(client, OV5693_8BIT,
++			       OV5693_SW_STREAM, OV5693_START_STREAMING);
++
++	ret = __ov5693_otp_read(sd, buf);
++
++	//mipi off and sw stream off after otp read
++	ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f);
++
++	ret = ov5693_write_reg(client, OV5693_8BIT,
++			       OV5693_SW_STREAM, OV5693_STOP_STREAMING);
++
++	/* Driver has failed to find valid data */
++	if (ret) {
++		dev_err(&client->dev, "sensor found no valid OTP data\n");
++		return ERR_PTR(ret);
++	}
++
++	return buf;
++}
++
++static int ov5693_update_bits(struct ov5693_device *sensor, u16 address,
++			      u16 mask, u16 bits)
++{
++	u16 value = 0;
++	int ret;
++
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT, address, &value);
++	if (ret)
++		return ret;
++
++	value &= ~mask;
++	value |= bits;
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT, address, value);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++/* Flip */
++
++static int ov5693_flip_vert_configure(struct ov5693_device *sensor, bool enable)
++{
++	u8 bits = OV5693_FORMAT1_FLIP_VERT_ISP_EN |
++		  OV5693_FORMAT1_FLIP_VERT_SENSOR_EN;
++	int ret;
++
++	ret = ov5693_update_bits(sensor, OV5693_FORMAT1_REG, bits,
++				 enable ? bits : 0);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int ov5693_flip_horz_configure(struct ov5693_device *sensor, bool enable)
++{
++	u8 bits = OV5693_FORMAT2_FLIP_HORZ_ISP_EN |
++		  OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN;
++	int ret;
++
++	ret = ov5693_update_bits(sensor, OV5693_FORMAT2_REG, bits,
++				 enable ? bits : 0);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++/*
++ * This returns the exposure time being used. This should only be used
++ * for filling in EXIF data, not for actual image processing.
++ */
++static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	u16 reg_v, reg_v2;
++	int ret;
++
++	/* get exposure */
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_EXPOSURE_L,
++			      &reg_v);
++	if (ret)
++		goto err;
++
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_EXPOSURE_M,
++			      &reg_v2);
++	if (ret)
++		goto err;
++
++	reg_v += reg_v2 << 8;
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_EXPOSURE_H,
++			      &reg_v2);
++	if (ret)
++		goto err;
++
++	*value = reg_v + (((u32)reg_v2 << 16));
++err:
++	return ret;
++}
++
++static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret = -EINVAL;
++	u8 vcm_code;
++
++	ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code);
++	if (ret)
++		return ret;
++
++	/* set reg VCM_CODE_MSB Bit[1:0] */
++	vcm_code = (vcm_code & VCM_CODE_MSB_MASK) |
++		   ((val >> 8) & ~VCM_CODE_MSB_MASK);
++	ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code);
++	if (ret)
++		return ret;
++
++	/* set reg VCM_CODE_LSB Bit[7:0] */
++	ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff));
++	if (ret)
++		return ret;
++
++	/* set required vcm move time */
++	vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF
++		   - AD5823_HIGH_FREQ_RANGE;
++	ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code);
++
++	return ret;
++}
++
++static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
++{
++	value = min(value, AD5823_MAX_FOCUS_POS);
++	return ad5823_t_focus_vcm(sd, value);
++}
++
++static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret = 0;
++
++	dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value);
++	value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS);
++	if (dev->vcm == VCM_DW9714) {
++		if (dev->vcm_update) {
++			ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF);
++			if (ret)
++				return ret;
++			ret = vcm_dw_i2c_write(client, DIRECT_VCM);
++			if (ret)
++				return ret;
++			ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON);
++			if (ret)
++				return ret;
++			dev->vcm_update = false;
++		}
++		ret = vcm_dw_i2c_write(client,
++				       vcm_val(value, VCM_DEFAULT_S));
++	} else if (dev->vcm == VCM_AD5823) {
++		ad5823_t_focus_abs(sd, value);
++	}
++	if (ret == 0) {
++		dev->number_of_steps = value - dev->focus;
++		dev->focus = value;
++		dev->timestamp_t_focus_abs = ktime_get();
++	} else
++		dev_err(&client->dev,
++			"%s: i2c failed. ret %d\n", __func__, ret);
++
++	return ret;
++}
++
++static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++
++	return ov5693_t_focus_abs(sd, dev->focus + value);
++}
++
++#define DELAY_PER_STEP_NS	1000000
++#define DELAY_MAX_PER_STEP_NS	(1000000 * 1023)
++
++/* Exposure */
++
++static int ov5693_get_exposure(struct ov5693_device *sensor)
++{
++	u16 reg_v, reg_v2;
++	int ret = 0;
++
++	/* get exposure */
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT,
++			      OV5693_EXPOSURE_L,
++			      &reg_v);
++	if (ret)
++		return ret;
++
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT,
++			      OV5693_EXPOSURE_M,
++			      &reg_v2);
++	if (ret)
++		return ret;
++
++	reg_v += reg_v2 << 8;
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT,
++			      OV5693_EXPOSURE_H,
++			      &reg_v2);
++	if (ret)
++		return ret;
++
++	printk("exposure set to: %u\n", reg_v + (((u32)reg_v2 << 16)));
++	return ret;
++}
++
++static int ov5693_exposure_configure(struct ov5693_device *sensor, u32 exposure)
++{
++	int ret;
++
++	ov5693_get_exposure(sensor);
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++			OV5693_EXPOSURE_CTRL_HH_REG, OV5693_EXPOSURE_CTRL_HH(exposure));
++	if (ret)
++		return ret;
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++			OV5693_EXPOSURE_CTRL_H_REG, OV5693_EXPOSURE_CTRL_H(exposure));
++	if (ret)
++		return ret;
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++			OV5693_EXPOSURE_CTRL_L_REG, OV5693_EXPOSURE_CTRL_L(exposure));
++	if (ret)
++		return ret;
++	ov5693_get_exposure(sensor);
++
++	return 0;
++}
++
++/* Gain */
++
++static int ov5693_get_gain(struct ov5693_device *sensor, u32 *gain)
++{
++	u16 gain_l, gain_h;
++	int ret = 0;
++
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT,
++			      OV5693_GAIN_CTRL_L_REG,
++			      &gain_l);
++	if (ret)
++		return ret;
++
++	ret = ov5693_read_reg(sensor->i2c_client, OV5693_8BIT,
++			      OV5693_GAIN_CTRL_H_REG,
++			      &gain_h);
++	if (ret)
++		return ret;
++
++	*gain = (u32)(((gain_h >> 8) & 0x03) |
++		(gain_l & 0xff));
++
++	return ret;
++}
++static int ov5693_gain_configure(struct ov5693_device *sensor, u32 gain)
++{
++	int ret;
++
++	/* A 1.0 gain is 0x400 */
++	gain = (gain * 1024)/1000;
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_16BIT,
++			OV5693_MWB_RED_GAIN_H, gain);
++	if (ret) {
++		dev_err(&sensor->i2c_client->dev, "%s: write %x error, aborted\n",
++			__func__, OV5693_MWB_RED_GAIN_H);
++		return ret;
++	}
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_16BIT,
++			OV5693_MWB_GREEN_GAIN_H, gain);
++	if (ret) {
++		dev_err(&sensor->i2c_client->dev, "%s: write %x error, aborted\n",
++			__func__, OV5693_MWB_RED_GAIN_H);
++		return ret;
++	}
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_16BIT,
++			OV5693_MWB_BLUE_GAIN_H, gain);
++	if (ret) {
++		dev_err(&sensor->i2c_client->dev, "%s: write %x error, aborted\n",
++			__func__, OV5693_MWB_RED_GAIN_H);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int ov5693_analog_gain_configure(struct ov5693_device *sensor, u32 gain)
++{
++	int ret;
++
++	/* Analog gain */
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++				OV5693_AGC_L, gain & 0xff);
++	if (ret) {
++		dev_err(&sensor->i2c_client->dev, "%s: write %x error, aborted\n",
++			__func__, OV5693_AGC_L);
++		return ret;
++	}
++
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++				OV5693_AGC_H, (gain >> 8) & 0xff);
++	if (ret) {
++		dev_err(&sensor->i2c_client->dev, "%s: write %x error, aborted\n",
++			__func__, OV5693_AGC_H);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
++{
++	struct ov5693_device *dev =
++	    container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
++	struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
++	int ret = 0;
++
++	switch (ctrl->id) {
++	case V4L2_CID_FOCUS_ABSOLUTE:
++		dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n",
++			__func__, ctrl->val);
++		ret = ov5693_t_focus_abs(&dev->sd, ctrl->val);
++		break;
++	case V4L2_CID_FOCUS_RELATIVE:
++		dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n",
++			__func__, ctrl->val);
++		ret = ov5693_t_focus_rel(&dev->sd, ctrl->val);
++		break;
++	case V4L2_CID_EXPOSURE:
++		dev_dbg(&client->dev, "%s: CID_EXPOSURE:%d.\n",
++			__func__, ctrl->val);
++		ret = ov5693_exposure_configure(dev, ctrl->val);
++		if (ret)
++			return ret;
++		break;
++	case V4L2_CID_ANALOGUE_GAIN:
++		dev_dbg(&client->dev, "%s: CID_ANALOGUE_GAIN:%d.\n",
++			__func__, ctrl->val);
++		ret = ov5693_analog_gain_configure(dev, ctrl->val);
++		if (ret)
++			return ret;
++		break;
++	case V4L2_CID_DIGITAL_GAIN:
++		dev_dbg(&client->dev, "%s: CID_DIGITAL_GAIN:%d.\n",
++			__func__, ctrl->val);
++		ret = ov5693_gain_configure(dev, ctrl->val);
++		if (ret)
++			return ret;
++		break;
++	case V4L2_CID_HFLIP:
++		return ov5693_flip_horz_configure(dev, !!ctrl->val);
++	case V4L2_CID_VFLIP:
++		return ov5693_flip_vert_configure(dev, !!ctrl->val);
++	default:
++		ret = -EINVAL;
++	}
++	return ret;
++}
++
++static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
++{
++	struct ov5693_device *dev =
++	    container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
++	int ret = 0;
++
++	switch (ctrl->id) {
++	case V4L2_CID_EXPOSURE_ABSOLUTE:
++		ret = ov5693_q_exposure(&dev->sd, &ctrl->val);
++		break;
++	case V4L2_CID_AUTOGAIN:
++		ret = ov5693_get_gain(dev, &ctrl->val);
++		break;
++	case V4L2_CID_FOCUS_ABSOLUTE:
++		/* NOTE: there was atomisp-specific function ov5693_q_focus_abs() */
++		break;
++	default:
++		ret = -EINVAL;
++	}
++
++	return ret;
++}
++
++static const struct v4l2_ctrl_ops ov5693_ctrl_ops = {
++	.s_ctrl = ov5693_s_ctrl,
++	.g_volatile_ctrl = ov5693_g_volatile_ctrl
++};
++
++static const struct v4l2_ctrl_config ov5693_controls[] = {
++	{
++		.ops = &ov5693_ctrl_ops,
++		.id = V4L2_CID_FOCUS_ABSOLUTE,
++		.type = V4L2_CTRL_TYPE_INTEGER,
++		.name = "focus move absolute",
++		.min = 0,
++		.max = OV5693_VCM_MAX_FOCUS_POS,
++		.step = 1,
++		.def = 0,
++		.flags = 0,
++	},
++	{
++		.ops = &ov5693_ctrl_ops,
++		.id = V4L2_CID_FOCUS_RELATIVE,
++		.type = V4L2_CTRL_TYPE_INTEGER,
++		.name = "focus move relative",
++		.min = OV5693_VCM_MAX_FOCUS_NEG,
++		.max = OV5693_VCM_MAX_FOCUS_POS,
++		.step = 1,
++		.def = 0,
++		.flags = 0,
++	},
++};
++
++static int ov5693_isp_configure(struct ov5693_device *sensor)
++{
++	int ret;
++
++	/* Enable lens correction. */
++	ret = ov5693_write_reg(sensor->i2c_client, OV5693_8BIT,
++			   OV5693_ISP_CTRL0_REG, 0x86);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int ov5693_init(struct v4l2_subdev *sd)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret;
++
++	if (!dev->has_vcm)
++		return 0;
++
++	dev_info(&client->dev, "%s\n", __func__);
++	mutex_lock(&dev->input_lock);
++	dev->vcm_update = false;
++
++	if (dev->vcm == VCM_AD5823) {
++		ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */
++		if (ret)
++			dev_err(&client->dev,
++				"vcm reset failed\n");
++		/*change the mode*/
++		ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB,
++				       AD5823_RING_CTRL_ENABLE);
++		if (ret)
++			dev_err(&client->dev,
++				"vcm enable ringing failed\n");
++		ret = ad5823_i2c_write(client, AD5823_REG_MODE,
++				       AD5823_ARC_RES1);
++		if (ret)
++			dev_err(&client->dev,
++				"vcm change mode failed\n");
++	}
++
++	/*change initial focus value for ad5823*/
++	if (dev->vcm == VCM_AD5823) {
++		dev->focus = AD5823_INIT_FOCUS_POS;
++		ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS);
++	} else {
++		dev->focus = 0;
++		ov5693_t_focus_abs(sd, 0);
++	}
++
++	ov5693_isp_configure(dev);
++	mutex_unlock(&dev->input_lock);
++
++	return 0;
++}
++
++static int __power_up(struct v4l2_subdev *sd)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	struct ov5693_device *sensor = to_ov5693_sensor(sd);
++	int ret;
++
++	ret = clk_prepare_enable(sensor->clk);
++	if (ret) {
++		dev_err(&client->dev, "Error enabling clock\n");
++		return -EINVAL;
++	}
++
++	if (sensor->indicator_led)
++		gpiod_set_value_cansleep(sensor->indicator_led, 1);
++
++	ret = regulator_bulk_enable(OV5693_NUM_SUPPLIES,
++			sensor->supplies);
++	if (ret)
++		goto fail_power;
++
++	__cci_delay(up_delay);
++
++	return 0;
++
++fail_power:
++	if (sensor->indicator_led)
++		gpiod_set_value_cansleep(sensor->indicator_led, 0);
++	dev_err(&client->dev, "sensor power-up failed\n");
++
++	return ret;
++}
++
++static int power_down(struct v4l2_subdev *sd)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++
++	dev->focus = OV5693_INVALID_CONFIG;
++
++	clk_disable_unprepare(dev->clk);
++
++	if (dev->indicator_led)
++		gpiod_set_value_cansleep(dev->indicator_led, 0);
++	return regulator_bulk_disable(OV5693_NUM_SUPPLIES, dev->supplies);
++}
++
++static int power_up(struct v4l2_subdev *sd)
++{
++	static const int retry_count = 4;
++	int i, ret;
++
++	for (i = 0; i < retry_count; i++) {
++		ret = __power_up(sd);
++		if (!ret)
++			return 0;
++
++		power_down(sd);
++	}
++	return ret;
++}
++
++static int ov5693_s_power(struct v4l2_subdev *sd, int on)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret;
++
++	dev_info(&client->dev, "%s: on %d\n", __func__, on);
++
++	if (on == 0)
++		return power_down(sd);
++
++	/* on == 1 */
++	ret = power_up(sd);
++	if (!ret) {
++		ret = ov5693_init(sd);
++		/* restore settings */
++		ov5693_res = ov5693_res_video;
++		N_RES = N_RES_VIDEO;
++	}
++
++	return ret;
++}
++
++/*
++ * distance - calculate the distance
++ * @res: resolution
++ * @w: width
++ * @h: height
++ *
++ * Get the gap between res_w/res_h and w/h.
++ * distance = (res_w/res_h - w/h) / (w/h) * 8192
++ * res->width/height smaller than w/h wouldn't be considered.
++ * The gap of ratio larger than 1/8 wouldn't be considered.
++ * Returns the value of gap or -1 if fail.
++ */
++#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
++static int distance(struct ov5693_resolution *res, u32 w, u32 h)
++{
++	int ratio;
++	int distance;
++
++	if (w == 0 || h == 0 ||
++	    res->width < w || res->height < h)
++		return -1;
++
++	ratio = res->width << 13;
++	ratio /= w;
++	ratio *= h;
++	ratio /= res->height;
++
++	distance = abs(ratio - 8192);
++
++	if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
++		return -1;
++
++	return distance;
++}
++
++/* Return the nearest higher resolution index
++ * Firstly try to find the approximate aspect ratio resolution
++ * If we find multiple same AR resolutions, choose the
++ * minimal size.
++ */
++static int nearest_resolution_index(int w, int h)
++{
++	int i;
++	int idx = -1;
++	int dist;
++	int min_dist = INT_MAX;
++	int min_res_w = INT_MAX;
++	struct ov5693_resolution *tmp_res = NULL;
++
++	for (i = 0; i < N_RES; i++) {
++		tmp_res = &ov5693_res[i];
++		dist = distance(tmp_res, w, h);
++		if (dist == -1)
++			continue;
++		if (dist < min_dist) {
++			min_dist = dist;
++			idx = i;
++			min_res_w = ov5693_res[i].width;
++			continue;
++		}
++		if (dist == min_dist && ov5693_res[i].width < min_res_w)
++			idx = i;
++	}
++
++	return idx;
++}
++
++static int get_resolution_index(int w, int h)
++{
++	int i;
++
++	for (i = 0; i < N_RES; i++) {
++		if (w != ov5693_res[i].width)
++			continue;
++		if (h != ov5693_res[i].height)
++			continue;
++
++		return i;
++	}
++
++	return -1;
++}
++
++/* TODO: remove it. */
++static int startup(struct v4l2_subdev *sd)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret = 0;
++
++	ret = ov5693_write_reg(client, OV5693_8BIT,
++			       OV5693_SW_RESET, 0x01);
++	if (ret) {
++		dev_err(&client->dev, "ov5693 reset err.\n");
++		return ret;
++	}
++
++	ret = ov5693_write_reg_array(client, ov5693_global_setting);
++	if (ret) {
++		dev_err(&client->dev, "ov5693 write register err.\n");
++		return ret;
++	}
++
++	ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs);
++	if (ret) {
++		dev_err(&client->dev, "ov5693 write register err.\n");
++		return ret;
++	}
++
++	return ret;
++}
++
++static int ov5693_set_fmt(struct v4l2_subdev *sd,
++			  struct v4l2_subdev_pad_config *cfg,
++			  struct v4l2_subdev_format *format)
++{
++	struct v4l2_mbus_framefmt *fmt = &format->format;
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret = 0;
++	int idx;
++	int cnt;
++
++	if (format->pad)
++		return -EINVAL;
++	if (!fmt)
++		return -EINVAL;
++
++	mutex_lock(&dev->input_lock);
++	idx = nearest_resolution_index(fmt->width, fmt->height);
++	if (idx == -1) {
++		/* return the largest resolution */
++		fmt->width = ov5693_res[N_RES - 1].width;
++		fmt->height = ov5693_res[N_RES - 1].height;
++	} else {
++		fmt->width = ov5693_res[idx].width;
++		fmt->height = ov5693_res[idx].height;
++	}
++
++	fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
++	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
++		cfg->try_fmt = *fmt;
++		ret = 0;
++		goto mutex_unlock;
++	}
++
++	dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
++	if (dev->fmt_idx == -1) {
++		dev_err(&client->dev, "get resolution fail\n");
++		ret = -EINVAL;
++		goto mutex_unlock;
++	}
++
++	for (cnt = 0; cnt < OV5693_POWER_UP_RETRY_NUM; cnt++) {
++		ret = power_up(sd);
++		if (ret) {
++			dev_err(&client->dev, "power up failed\n");
++			continue;
++		}
++
++		mutex_unlock(&dev->input_lock);
++		ov5693_init(sd);
++		mutex_lock(&dev->input_lock);
++		ret = startup(sd);
++		if (ret)
++			dev_err(&client->dev, " startup() FAILED!\n");
++		else
++			break;
++	}
++	if (cnt == OV5693_POWER_UP_RETRY_NUM) {
++		dev_err(&client->dev, "power up failed, gave up\n");
++		goto mutex_unlock;
++	}
++
++
++
++	/*
++	 * After sensor settings are set to HW, sometimes stream is started.
++	 * This would cause ISP timeout because ISP is not ready to receive
++	 * data yet. So add stop streaming here.
++	 */
++	ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
++			       OV5693_STOP_STREAMING);
++	if (ret)
++		dev_warn(&client->dev, "ov5693 stream off err\n");
++
++mutex_unlock:
++	mutex_unlock(&dev->input_lock);
++	return ret;
++}
++
++static const struct v4l2_rect *
++__ov5693_get_pad_crop(struct ov5693_device *dev, struct v4l2_subdev_pad_config *cfg,
++		      unsigned int pad, enum v4l2_subdev_format_whence which)
++{
++	switch (which) {
++	case V4L2_SUBDEV_FORMAT_TRY:
++		return v4l2_subdev_get_try_crop(&dev->sd, cfg, pad);
++	case V4L2_SUBDEV_FORMAT_ACTIVE:
++		return &dev->mode->crop;
++	}
++
++	return NULL;
++}
++static int ov5693_get_selection(struct v4l2_subdev *sd,
++				struct v4l2_subdev_pad_config *cfg,
++				struct v4l2_subdev_selection *sel)
++{
++	switch (sel->target) {
++	case V4L2_SEL_TGT_CROP: {
++		struct ov5693_device *dev = to_ov5693_sensor(sd);
++
++		mutex_lock(&dev->input_lock);
++		sel->r = *__ov5693_get_pad_crop(dev, cfg, sel->pad,
++						sel->which);
++		mutex_unlock(&dev->input_lock);
++
++		return 0;
++	}
++
++	case V4L2_SEL_TGT_NATIVE_SIZE:
++		sel->r.top = 0;
++		sel->r.left = 0;
++		sel->r.width = OV5693_NATIVE_WIDTH;
++		sel->r.height = OV5693_NATIVE_HEIGHT;
++
++		return 0;
++
++	case V4L2_SEL_TGT_CROP_DEFAULT:
++		sel->r.top = OV5693_PIXEL_ARRAY_TOP;
++		sel->r.left = OV5693_PIXEL_ARRAY_LEFT;
++		sel->r.width = OV5693_PIXEL_ARRAY_WIDTH;
++		sel->r.height = OV5693_PIXEL_ARRAY_HEIGHT;
++
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++static int ov5693_get_fmt(struct v4l2_subdev *sd,
++			  struct v4l2_subdev_pad_config *cfg,
++			  struct v4l2_subdev_format *format)
++{
++	struct v4l2_mbus_framefmt *fmt = &format->format;
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++
++	if (format->pad)
++		return -EINVAL;
++
++	if (!fmt)
++		return -EINVAL;
++
++	fmt->width = ov5693_res[dev->fmt_idx].width;
++	fmt->height = ov5693_res[dev->fmt_idx].height;
++	fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
++
++	return 0;
++}
++
++static int ov5693_detect(struct i2c_client *client)
++{
++	struct i2c_adapter *adapter = client->adapter;
++	u16 high, low;
++	int ret;
++	u16 id;
++	u8 revision;
++
++	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++		return -ENODEV;
++
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_SC_CMMN_CHIP_ID_H, &high);
++	if (ret) {
++		dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
++		return -ENODEV;
++	}
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_SC_CMMN_CHIP_ID_L, &low);
++	id = ((((u16)high) << 8) | (u16)low);
++
++	if (id != OV5693_ID) {
++		dev_err(&client->dev, "sensor ID error 0x%x\n", id);
++		return -ENODEV;
++	}
++
++	ret = ov5693_read_reg(client, OV5693_8BIT,
++			      OV5693_SC_CMMN_SUB_ID, &high);
++	revision = (u8)high & 0x0f;
++
++	dev_info(&client->dev, "sensor_revision = 0x%x\n", revision);
++	dev_info(&client->dev, "sensor_address = 0x%02x\n", client->addr);
++	dev_info(&client->dev, "detect ov5693 success\n");
++	return 0;
++}
++
++static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret;
++
++	mutex_lock(&dev->input_lock);
++
++	/* power_on() here before streaming for regular PCs. */
++	if (enable) {
++		ret = power_up(sd);
++		if (ret) {
++			dev_err(&client->dev, "sensor power-up error\n");
++			goto out;
++		}
++	}
++
++	ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
++			       enable ? OV5693_START_STREAMING :
++			       OV5693_STOP_STREAMING);
++
++	/* power_off() here after streaming for regular PCs. */
++	if (!enable)
++		power_down(sd);
++
++out:
++	mutex_unlock(&dev->input_lock);
++
++	return ret;
++}
++
++static int ov5693_s_config(struct v4l2_subdev *sd, int irq)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++	struct i2c_client *client = v4l2_get_subdevdata(sd);
++	int ret = 0;
++
++	mutex_lock(&dev->input_lock);
++	ret = power_up(sd);
++	if (ret) {
++		dev_err(&client->dev, "ov5693 power-up err.\n");
++		goto fail_power_on;
++	}
++
++	if (!dev->vcm)
++		dev->vcm = vcm_detect(client);
++
++	/* config & detect sensor */
++	ret = ov5693_detect(client);
++	if (ret) {
++		dev_err(&client->dev, "ov5693_detect err s_config.\n");
++		goto fail_power_on;
++	}
++
++	dev->otp_data = ov5693_otp_read(sd);
++
++	/* turn off sensor, after probed */
++	ret = power_down(sd);
++	if (ret) {
++		dev_err(&client->dev, "ov5693 power-off err.\n");
++		goto fail_power_on;
++	}
++	mutex_unlock(&dev->input_lock);
++
++	return ret;
++
++fail_power_on:
++	power_down(sd);
++	dev_err(&client->dev, "sensor power-gating failed\n");
++	mutex_unlock(&dev->input_lock);
++	return ret;
++}
++
++static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
++				   struct v4l2_subdev_frame_interval *interval)
++{
++	struct ov5693_device *dev = to_ov5693_sensor(sd);
++
++	interval->interval.numerator = 1;
++	interval->interval.denominator = ov5693_res[dev->fmt_idx].fps;
++
++	return 0;
++}
++
++static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
++				 struct v4l2_subdev_pad_config *cfg,
++				 struct v4l2_subdev_mbus_code_enum *code)
++{
++	if (code->index >= MAX_FMTS)
++		return -EINVAL;
++
++	code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
++	return 0;
++}
++
++static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
++				  struct v4l2_subdev_pad_config *cfg,
++				  struct v4l2_subdev_frame_size_enum *fse)
++{
++	int index = fse->index;
++
++	if (index >= N_RES)
++		return -EINVAL;
++
++	fse->min_width = ov5693_res[index].width;
++	fse->min_height = ov5693_res[index].height;
++	fse->max_width = ov5693_res[index].width;
++	fse->max_height = ov5693_res[index].height;
++
++	return 0;
++}
++
++static const struct v4l2_subdev_video_ops ov5693_video_ops = {
++	.s_stream = ov5693_s_stream,
++	.g_frame_interval = ov5693_g_frame_interval,
++};
++
++static const struct v4l2_subdev_core_ops ov5693_core_ops = {
++	.s_power = ov5693_s_power,
++};
++
++static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
++	.enum_mbus_code = ov5693_enum_mbus_code,
++	.enum_frame_size = ov5693_enum_frame_size,
++	.get_fmt = ov5693_get_fmt,
++	.set_fmt = ov5693_set_fmt,
++	.get_selection = ov5693_get_selection,
++};
++
++static const struct v4l2_subdev_ops ov5693_ops = {
++	.core = &ov5693_core_ops,
++	.video = &ov5693_video_ops,
++	.pad = &ov5693_pad_ops,
++};
++
++static int ov5693_remove(struct i2c_client *client)
++{
++	struct v4l2_subdev *sd = i2c_get_clientdata(client);
++	struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
++	unsigned int i = OV5693_NUM_SUPPLIES;
++
++	dev_info(&client->dev, "%s...\n", __func__);
++
++	gpiod_put(ov5693->reset);
++	gpiod_put(ov5693->indicator_led);
++	while (i--)
++		regulator_put(ov5693->supplies[i].consumer);
++
++	v4l2_async_unregister_subdev(sd);
++
++	media_entity_cleanup(&ov5693->sd.entity);
++	v4l2_ctrl_handler_free(&ov5693->ctrl_handler);
++	kfree(ov5693);
++
++	return 0;
++}
++
++static int ov5693_init_controls(struct ov5693_device *ov5693)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(&ov5693->sd);
++	const struct v4l2_ctrl_ops *ops = &ov5693_ctrl_ops;
++	struct v4l2_ctrl *ctrl;
++	unsigned int i;
++	int ret;
++	int hblank;
++
++	ret = v4l2_ctrl_handler_init(&ov5693->ctrl_handler,
++				     ARRAY_SIZE(ov5693_controls));
++	if (ret) {
++		ov5693_remove(client);
++		return ret;
++	}
++
++	for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++)
++		v4l2_ctrl_new_custom(&ov5693->ctrl_handler,
++				     &ov5693_controls[i],
++				     NULL);
++
++	/* link freq */
++	ctrl = v4l2_ctrl_new_int_menu(&ov5693->ctrl_handler, NULL,
++				      V4L2_CID_LINK_FREQ,
++				      0, 0, link_freq_menu_items);
++	if (ctrl)
++		ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
++
++	/* pixel rate */
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, NULL, V4L2_CID_PIXEL_RATE,
++			  0, OV5693_PIXEL_RATE, 1, OV5693_PIXEL_RATE);
++
++	if (ov5693->ctrl_handler.error) {
++		ov5693_remove(client);
++		return ov5693->ctrl_handler.error;
++	}
++
++	/* Exposure */
++
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops, V4L2_CID_EXPOSURE, 16, 1048575, 16,
++			  512);
++
++	/* Gain */
++
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops, V4L2_CID_ANALOGUE_GAIN, 1, 1023, 1, 128);
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops, V4L2_CID_DIGITAL_GAIN, 1, 3999, 1, 1000);
++
++	/* Flip */
++
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
++	v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
++
++	hblank = OV5693_PPL_DEFAULT - ov5693->mode->width;
++	ov5693->hblank = v4l2_ctrl_new_std(&ov5693->ctrl_handler, ops,
++					   V4L2_CID_HBLANK, hblank, hblank,
++					   1, hblank);
++	if (ov5693->hblank)
++		ov5693->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
++
++	/* Use same lock for controls as for everything else. */
++	ov5693->ctrl_handler.lock = &ov5693->input_lock;
++	ov5693->sd.ctrl_handler = &ov5693->ctrl_handler;
++
++	return 0;
++}
++
++static int ov5693_configure_gpios(struct ov5693_device *ov5693)
++{
++	ov5693->reset = gpiod_get_index(&ov5693->i2c_client->dev, "reset", 0,
++                                        GPIOD_OUT_HIGH);
++        if (IS_ERR(ov5693->reset)) {
++                dev_err(&ov5693->i2c_client->dev, "Couldn't find reset GPIO\n");
++                return -EINVAL;
++        }
++
++        ov5693->indicator_led = gpiod_get_index_optional(&ov5693->i2c_client->dev, "indicator-led", 0,
++                                        GPIOD_OUT_HIGH);
++        if (IS_ERR(ov5693->indicator_led)) {
++                dev_err(&ov5693->i2c_client->dev, "Couldn't find indicator-led GPIO\n");
++                return -EINVAL;
++        }
++
++        return 0;
++}
++
++static int ov5693_get_regulators(struct ov5693_device *ov5693)
++{
++	unsigned int i;
++
++	for (i = 0; i < OV5693_NUM_SUPPLIES; i++)
++		ov5693->supplies[i].supply = ov5693_supply_names[i];
++
++	return regulator_bulk_get(&ov5693->i2c_client->dev,
++				       OV5693_NUM_SUPPLIES,
++				       ov5693->supplies);
++}
++
++static int ov5693_probe(struct i2c_client *client)
++{
++	struct ov5693_device *ov5693;
++	int ret = 0;
++
++	dev_info(&client->dev, "%s() called", __func__);
++
++	ov5693 = kzalloc(sizeof(*ov5693), GFP_KERNEL);
++	if (!ov5693)
++		return -ENOMEM;
++
++	ov5693->i2c_client = client;
++
++	/* check if VCM device exists */
++	/* TODO: read from SSDB */
++	ov5693->has_vcm = false;
++
++	mutex_init(&ov5693->input_lock);
++
++	v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
++
++	ov5693->clk = devm_clk_get(&client->dev, "xvclk");
++	if (IS_ERR(ov5693->clk)) {
++		dev_err(&client->dev, "Error getting clock\n");
++		return -EINVAL;
++	}
++
++	ret = ov5693_configure_gpios(ov5693);
++        if (ret)
++                goto out_free;
++
++	ret = ov5693_get_regulators(ov5693);
++        if (ret)
++                goto out_put_reset;
++
++	ret = ov5693_s_config(&ov5693->sd, client->irq);
++	if (ret)
++		goto out_put_reset;
++
++	ov5693->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
++	ov5693->pad.flags = MEDIA_PAD_FL_SOURCE;
++	ov5693->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
++	ov5693->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
++	ov5693->mode = &ov5693_res_video[N_RES_VIDEO-1];
++
++	ret = ov5693_init_controls(ov5693);
++	if (ret)
++		ov5693_remove(client);
++
++	ret = media_entity_pads_init(&ov5693->sd.entity, 1, &ov5693->pad);
++	if (ret)
++		ov5693_remove(client);
++
++	ret = v4l2_async_register_subdev_sensor_common(&ov5693->sd);
++	if (ret) {
++		dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret);
++		goto media_entity_cleanup;
++	}
++
++	return ret;
++
++media_entity_cleanup:
++	media_entity_cleanup(&ov5693->sd.entity);
++out_put_reset:
++        gpiod_put(ov5693->reset);
++out_free:
++	v4l2_device_unregister_subdev(&ov5693->sd);
++	kfree(ov5693);
++	return ret;
++}
++
++static const struct acpi_device_id ov5693_acpi_match[] = {
++	{"INT33BE"},
++	{},
++};
++MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
++
++static struct i2c_driver ov5693_driver = {
++	.driver = {
++		.name = "ov5693",
++		.acpi_match_table = ov5693_acpi_match,
++	},
++	.probe_new = ov5693_probe,
++	.remove = ov5693_remove,
++};
++module_i2c_driver(ov5693_driver);
++
++MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/i2c/ov5693.h b/drivers/media/i2c/ov5693.h
+new file mode 100644
+index 000000000000..9a508e1f3624
+--- /dev/null
++++ b/drivers/media/i2c/ov5693.h
+@@ -0,0 +1,1430 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Support for OmniVision OV5693 5M camera sensor.
++ *
++ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ *
++ */
++
++#ifndef __OV5693_H__
++#define __OV5693_H__
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/i2c.h>
++#include <linux/delay.h>
++#include <linux/videodev2.h>
++#include <linux/spinlock.h>
++#include <media/v4l2-subdev.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-ctrls.h>
++#include <linux/v4l2-mediabus.h>
++#include <media/media-entity.h>
++
++#define OV5693_HID "INT33BE"
++
++/*
++ * FIXME: non-preview resolutions are currently broken
++ */
++#define ENABLE_NON_PREVIEW	1
++
++#define OV5693_POWER_UP_RETRY_NUM 5
++
++/* Defines for register writes and register array processing */
++#define I2C_MSG_LENGTH		0x2
++#define I2C_RETRY_COUNT		5
++
++#define OV5693_FOCAL_LENGTH_NUM	334	/*3.34mm*/
++#define OV5693_FOCAL_LENGTH_DEM	100
++#define OV5693_F_NUMBER_DEFAULT_NUM	24
++#define OV5693_F_NUMBER_DEM	10
++
++#define MAX_FMTS		1
++
++/* sensor_mode_data read_mode adaptation */
++#define OV5693_READ_MODE_BINNING_ON	0x0400
++#define OV5693_READ_MODE_BINNING_OFF	0x00
++#define OV5693_INTEGRATION_TIME_MARGIN	8
++
++#define OV5693_MAX_EXPOSURE_VALUE	0xFFF1
++#define OV5693_MAX_GAIN_VALUE		0xFF
++
++/*
++ * focal length bits definition:
++ * bits 31-16: numerator, bits 15-0: denominator
++ */
++#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064
++
++/*
++ * current f-number bits definition:
++ * bits 31-16: numerator, bits 15-0: denominator
++ */
++#define OV5693_F_NUMBER_DEFAULT 0x18000a
++
++/*
++ * f-number range bits definition:
++ * bits 31-24: max f-number numerator
++ * bits 23-16: max f-number denominator
++ * bits 15-8: min f-number numerator
++ * bits 7-0: min f-number denominator
++ */
++#define OV5693_F_NUMBER_RANGE 0x180a180a
++#define OV5693_ID	0x5690
++
++#define OV5693_FINE_INTG_TIME_MIN 0
++#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0
++#define OV5693_COARSE_INTG_TIME_MIN 1
++#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6
++
++#define OV5693_BIN_FACTOR_MAX 4
++/*
++ * OV5693 System control registers
++ */
++#define OV5693_SW_SLEEP				0x0100
++#define OV5693_SW_RESET				0x0103
++#define OV5693_SW_STREAM			0x0100
++
++#define OV5693_SC_CMMN_CHIP_ID_H		0x300A
++#define OV5693_SC_CMMN_CHIP_ID_L		0x300B
++#define OV5693_SC_CMMN_SCCB_ID			0x300C
++#define OV5693_SC_CMMN_SUB_ID			0x302A /* process, version*/
++/*Bit[7:4] Group control, Bit[3:0] Group ID*/
++#define OV5693_GROUP_ACCESS			0x3208
++/*
++*Bit[3:0] Bit[19:16] of exposure,
++*remaining 16 bits lies in Reg0x3501&Reg0x3502
++*/
++#define OV5693_EXPOSURE_H			0x3500
++#define OV5693_EXPOSURE_M			0x3501
++#define OV5693_EXPOSURE_L			0x3502
++/*Bit[1:0] means Bit[9:8] of gain*/
++#define OV5693_AGC_H				0x350A
++#define OV5693_AGC_L				0x350B /*Bit[7:0] of gain*/
++
++#define OV5693_HORIZONTAL_START_H		0x3800 /*Bit[11:8]*/
++#define OV5693_HORIZONTAL_START_L		0x3801 /*Bit[7:0]*/
++#define OV5693_VERTICAL_START_H			0x3802 /*Bit[11:8]*/
++#define OV5693_VERTICAL_START_L			0x3803 /*Bit[7:0]*/
++#define OV5693_HORIZONTAL_END_H			0x3804 /*Bit[11:8]*/
++#define OV5693_HORIZONTAL_END_L			0x3805 /*Bit[7:0]*/
++#define OV5693_VERTICAL_END_H			0x3806 /*Bit[11:8]*/
++#define OV5693_VERTICAL_END_L			0x3807 /*Bit[7:0]*/
++#define OV5693_HORIZONTAL_OUTPUT_SIZE_H		0x3808 /*Bit[3:0]*/
++#define OV5693_HORIZONTAL_OUTPUT_SIZE_L		0x3809 /*Bit[7:0]*/
++#define OV5693_VERTICAL_OUTPUT_SIZE_H		0x380a /*Bit[3:0]*/
++#define OV5693_VERTICAL_OUTPUT_SIZE_L		0x380b /*Bit[7:0]*/
++/*High 8-bit, and low 8-bit HTS address is 0x380d*/
++#define OV5693_TIMING_HTS_H			0x380C
++/*High 8-bit, and low 8-bit HTS address is 0x380d*/
++#define OV5693_TIMING_HTS_L			0x380D
++/*High 8-bit, and low 8-bit HTS address is 0x380f*/
++#define OV5693_TIMING_VTS_H			0x380e
++/*High 8-bit, and low 8-bit HTS address is 0x380f*/
++#define OV5693_TIMING_VTS_L			0x380f
++
++#define OV5693_MWB_RED_GAIN_H			0x3400
++#define OV5693_MWB_GREEN_GAIN_H			0x3402
++#define OV5693_MWB_BLUE_GAIN_H			0x3404
++#define OV5693_MWB_GAIN_MAX			0x0fff
++
++#define OV5693_START_STREAMING			0x01
++#define OV5693_STOP_STREAMING			0x00
++
++#define VCM_ADDR           0x0c
++#define VCM_CODE_MSB       0x04
++
++#define OV5693_INVALID_CONFIG	0xffffffff
++
++#define OV5693_VCM_SLEW_STEP			0x30F0
++#define OV5693_VCM_SLEW_STEP_MAX		0x7
++#define OV5693_VCM_SLEW_STEP_MASK		0x7
++#define OV5693_VCM_CODE				0x30F2
++#define OV5693_VCM_SLEW_TIME			0x30F4
++#define OV5693_VCM_SLEW_TIME_MAX		0xffff
++#define OV5693_VCM_ENABLE			0x8000
++
++#define OV5693_VCM_MAX_FOCUS_NEG       -1023
++#define OV5693_VCM_MAX_FOCUS_POS       1023
++
++#define DLC_ENABLE 1
++#define DLC_DISABLE 0
++#define VCM_PROTECTION_OFF     0xeca3
++#define VCM_PROTECTION_ON      0xdc51
++#define VCM_DEFAULT_S 0x0
++#define vcm_step_s(a) (u8)(a & 0xf)
++#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
++#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
++#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
++#define vcm_val(data, s) (u16)(data << 4 | s)
++#define DIRECT_VCM vcm_dlc_mclk(0, 0)
++
++/* Defines for OTP Data Registers */
++#define OV5693_FRAME_OFF_NUM		0x4202
++#define OV5693_OTP_BYTE_MAX		32	//change to 32 as needed by otpdata
++#define OV5693_OTP_SHORT_MAX		16
++#define OV5693_OTP_START_ADDR		0x3D00
++#define OV5693_OTP_END_ADDR		0x3D0F
++#define OV5693_OTP_DATA_SIZE		320
++#define OV5693_OTP_PROGRAM_REG		0x3D80
++#define OV5693_OTP_READ_REG		0x3D81	// 1:Enable 0:disable
++#define OV5693_OTP_BANK_REG		0x3D84	//otp bank and mode
++#define OV5693_OTP_READY_REG_DONE	1
++#define OV5693_OTP_BANK_MAX		28
++#define OV5693_OTP_BANK_SIZE		16	//16 bytes per bank
++#define OV5693_OTP_READ_ONETIME		16
++#define OV5693_OTP_MODE_READ		1
++
++/* link freq and pixel rate required for IPU3 */
++#define OV5693_LINK_FREQ_640MHZ		640000000
++/* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample
++ * To avoid integer overflow, dividing by bits_per_sample first.
++ */
++#define OV5693_PIXEL_RATE		(OV5693_LINK_FREQ_640MHZ / 10) * 2 * 2
++static const s64 link_freq_menu_items[] = {
++	OV5693_LINK_FREQ_640MHZ
++};
++
++#define OV5693_NUM_SUPPLIES             2
++static const char * const ov5693_supply_names[] = {
++        "avdd",
++        "dovdd",
++};
++
++struct regval_list {
++	u16 reg_num;
++	u8 value;
++};
++
++struct ov5693_resolution {
++	u8 *desc;
++	const struct ov5693_reg *regs;
++	int res;
++	int width;
++	int height;
++	int fps;
++	int pix_clk_freq;
++	u16 pixels_per_line;
++	u16 lines_per_frame;
++	u8 bin_factor_x;
++	u8 bin_factor_y;
++	u8 bin_mode;
++	bool used;
++
++	/* Analog crop rectangle. */
++	struct v4l2_rect crop;
++};
++
++struct ov5693_format {
++	u8 *desc;
++	u32 pixelformat;
++	struct ov5693_reg *regs;
++};
++
++enum vcm_type {
++	VCM_UNKNOWN,
++	VCM_AD5823,
++	VCM_DW9714,
++};
++
++/*
++ * ov5693 device structure.
++ */
++struct ov5693_device {
++	struct i2c_client *i2c_client;
++	struct v4l2_subdev sd;
++	struct media_pad pad;
++	struct v4l2_mbus_framefmt format;
++	struct mutex input_lock;
++	struct v4l2_ctrl_handler ctrl_handler;
++
++        struct gpio_desc *reset;
++        struct gpio_desc *indicator_led;
++        struct regulator_bulk_data supplies[OV5693_NUM_SUPPLIES];
++	struct clk *clk;
++
++	/* Current mode */
++	const struct ov5693_resolution *mode;
++
++	struct camera_sensor_platform_data *platform_data;
++	ktime_t timestamp_t_focus_abs;
++	int vt_pix_clk_freq_mhz;
++	int fmt_idx;
++	int run_mode;
++	int otp_size;
++	u8 *otp_data;
++	u32 focus;
++	s16 number_of_steps;
++	u8 res;
++	u8 type;
++	bool vcm_update;
++	enum vcm_type vcm;
++
++	bool has_vcm;
++
++	struct v4l2_ctrl *hblank;
++};
++
++enum ov5693_tok_type {
++	OV5693_8BIT  = 0x0001,
++	OV5693_16BIT = 0x0002,
++	OV5693_32BIT = 0x0004,
++	OV5693_TOK_TERM   = 0xf000,	/* terminating token for reg list */
++	OV5693_TOK_DELAY  = 0xfe00,	/* delay token for reg list */
++	OV5693_TOK_MASK = 0xfff0
++};
++
++/**
++ * struct ov5693_reg - MI sensor  register format
++ * @type: type of the register
++ * @reg: 16-bit offset to register
++ * @val: 8/16/32-bit register value
++ *
++ * Define a structure for sensor register initialization values
++ */
++struct ov5693_reg {
++	enum ov5693_tok_type type;
++	u16 reg;
++	u32 val;	/* @set value for read/mod/write, @mask */
++};
++
++#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
++
++#define OV5693_MAX_WRITE_BUF_SIZE	30
++
++struct ov5693_write_buffer {
++	u16 addr;
++	u8 data[OV5693_MAX_WRITE_BUF_SIZE];
++};
++
++struct ov5693_write_ctrl {
++	int index;
++	struct ov5693_write_buffer buffer;
++};
++
++static struct ov5693_reg const ov5693_global_setting[] = {
++	{OV5693_8BIT, 0x0103, 0x01},
++	{OV5693_8BIT, 0x3001, 0x0a},
++	{OV5693_8BIT, 0x3002, 0x80},
++	{OV5693_8BIT, 0x3006, 0x00},
++	{OV5693_8BIT, 0x3011, 0x21},
++	{OV5693_8BIT, 0x3012, 0x09},
++	{OV5693_8BIT, 0x3013, 0x10},
++	{OV5693_8BIT, 0x3014, 0x00},
++	{OV5693_8BIT, 0x3015, 0x08},
++	{OV5693_8BIT, 0x3016, 0xf0},
++	{OV5693_8BIT, 0x3017, 0xf0},
++	{OV5693_8BIT, 0x3018, 0xf0},
++	{OV5693_8BIT, 0x301b, 0xb4},
++	{OV5693_8BIT, 0x301d, 0x02},
++	{OV5693_8BIT, 0x3021, 0x00},
++	{OV5693_8BIT, 0x3022, 0x01},
++	{OV5693_8BIT, 0x3028, 0x44},
++	{OV5693_8BIT, 0x3098, 0x02},
++	{OV5693_8BIT, 0x3099, 0x19},
++	{OV5693_8BIT, 0x309a, 0x02},
++	{OV5693_8BIT, 0x309b, 0x01},
++	{OV5693_8BIT, 0x309c, 0x00},
++	{OV5693_8BIT, 0x30a0, 0xd2},
++	{OV5693_8BIT, 0x30a2, 0x01},
++	{OV5693_8BIT, 0x30b2, 0x00},
++	{OV5693_8BIT, 0x30b3, 0x7d},
++	{OV5693_8BIT, 0x30b4, 0x03},
++	{OV5693_8BIT, 0x30b5, 0x04},
++	{OV5693_8BIT, 0x30b6, 0x01},
++	{OV5693_8BIT, 0x3104, 0x21},
++	{OV5693_8BIT, 0x3106, 0x00},
++	{OV5693_8BIT, 0x3400, 0x04},
++	{OV5693_8BIT, 0x3401, 0x00},
++	{OV5693_8BIT, 0x3402, 0x04},
++	{OV5693_8BIT, 0x3403, 0x00},
++	{OV5693_8BIT, 0x3404, 0x04},
++	{OV5693_8BIT, 0x3405, 0x00},
++	{OV5693_8BIT, 0x3406, 0x01},
++	{OV5693_8BIT, 0x3500, 0x00},
++	{OV5693_8BIT, 0x3503, 0x07},
++	{OV5693_8BIT, 0x3504, 0x00},
++	{OV5693_8BIT, 0x3505, 0x00},
++	{OV5693_8BIT, 0x3506, 0x00},
++	{OV5693_8BIT, 0x3507, 0x02},
++	{OV5693_8BIT, 0x3508, 0x00},
++	{OV5693_8BIT, 0x3509, 0x10},
++	{OV5693_8BIT, 0x350a, 0x00},
++	{OV5693_8BIT, 0x350b, 0x40},
++	{OV5693_8BIT, 0x3601, 0x0a},
++	{OV5693_8BIT, 0x3602, 0x38},
++	{OV5693_8BIT, 0x3612, 0x80},
++	{OV5693_8BIT, 0x3620, 0x54},
++	{OV5693_8BIT, 0x3621, 0xc7},
++	{OV5693_8BIT, 0x3622, 0x0f},
++	{OV5693_8BIT, 0x3625, 0x10},
++	{OV5693_8BIT, 0x3630, 0x55},
++	{OV5693_8BIT, 0x3631, 0xf4},
++	{OV5693_8BIT, 0x3632, 0x00},
++	{OV5693_8BIT, 0x3633, 0x34},
++	{OV5693_8BIT, 0x3634, 0x02},
++	{OV5693_8BIT, 0x364d, 0x0d},
++	{OV5693_8BIT, 0x364f, 0xdd},
++	{OV5693_8BIT, 0x3660, 0x04},
++	{OV5693_8BIT, 0x3662, 0x10},
++	{OV5693_8BIT, 0x3663, 0xf1},
++	{OV5693_8BIT, 0x3665, 0x00},
++	{OV5693_8BIT, 0x3666, 0x20},
++	{OV5693_8BIT, 0x3667, 0x00},
++	{OV5693_8BIT, 0x366a, 0x80},
++	{OV5693_8BIT, 0x3680, 0xe0},
++	{OV5693_8BIT, 0x3681, 0x00},
++	{OV5693_8BIT, 0x3700, 0x42},
++	{OV5693_8BIT, 0x3701, 0x14},
++	{OV5693_8BIT, 0x3702, 0xa0},
++	{OV5693_8BIT, 0x3703, 0xd8},
++	{OV5693_8BIT, 0x3704, 0x78},
++	{OV5693_8BIT, 0x3705, 0x02},
++	{OV5693_8BIT, 0x370a, 0x00},
++	{OV5693_8BIT, 0x370b, 0x20},
++	{OV5693_8BIT, 0x370c, 0x0c},
++	{OV5693_8BIT, 0x370d, 0x11},
++	{OV5693_8BIT, 0x370e, 0x00},
++	{OV5693_8BIT, 0x370f, 0x40},
++	{OV5693_8BIT, 0x3710, 0x00},
++	{OV5693_8BIT, 0x371a, 0x1c},
++	{OV5693_8BIT, 0x371b, 0x05},
++	{OV5693_8BIT, 0x371c, 0x01},
++	{OV5693_8BIT, 0x371e, 0xa1},
++	{OV5693_8BIT, 0x371f, 0x0c},
++	{OV5693_8BIT, 0x3721, 0x00},
++	{OV5693_8BIT, 0x3724, 0x10},
++	{OV5693_8BIT, 0x3726, 0x00},
++	{OV5693_8BIT, 0x372a, 0x01},
++	{OV5693_8BIT, 0x3730, 0x10},
++	{OV5693_8BIT, 0x3738, 0x22},
++	{OV5693_8BIT, 0x3739, 0xe5},
++	{OV5693_8BIT, 0x373a, 0x50},
++	{OV5693_8BIT, 0x373b, 0x02},
++	{OV5693_8BIT, 0x373c, 0x41},
++	{OV5693_8BIT, 0x373f, 0x02},
++	{OV5693_8BIT, 0x3740, 0x42},
++	{OV5693_8BIT, 0x3741, 0x02},
++	{OV5693_8BIT, 0x3742, 0x18},
++	{OV5693_8BIT, 0x3743, 0x01},
++	{OV5693_8BIT, 0x3744, 0x02},
++	{OV5693_8BIT, 0x3747, 0x10},
++	{OV5693_8BIT, 0x374c, 0x04},
++	{OV5693_8BIT, 0x3751, 0xf0},
++	{OV5693_8BIT, 0x3752, 0x00},
++	{OV5693_8BIT, 0x3753, 0x00},
++	{OV5693_8BIT, 0x3754, 0xc0},
++	{OV5693_8BIT, 0x3755, 0x00},
++	{OV5693_8BIT, 0x3756, 0x1a},
++	{OV5693_8BIT, 0x3758, 0x00},
++	{OV5693_8BIT, 0x3759, 0x0f},
++	{OV5693_8BIT, 0x376b, 0x44},
++	{OV5693_8BIT, 0x375c, 0x04},
++	{OV5693_8BIT, 0x3774, 0x10},
++	{OV5693_8BIT, 0x3776, 0x00},
++	{OV5693_8BIT, 0x377f, 0x08},
++	{OV5693_8BIT, 0x3780, 0x22},
++	{OV5693_8BIT, 0x3781, 0x0c},
++	{OV5693_8BIT, 0x3784, 0x2c},
++	{OV5693_8BIT, 0x3785, 0x1e},
++	{OV5693_8BIT, 0x378f, 0xf5},
++	{OV5693_8BIT, 0x3791, 0xb0},
++	{OV5693_8BIT, 0x3795, 0x00},
++	{OV5693_8BIT, 0x3796, 0x64},
++	{OV5693_8BIT, 0x3797, 0x11},
++	{OV5693_8BIT, 0x3798, 0x30},
++	{OV5693_8BIT, 0x3799, 0x41},
++	{OV5693_8BIT, 0x379a, 0x07},
++	{OV5693_8BIT, 0x379b, 0xb0},
++	{OV5693_8BIT, 0x379c, 0x0c},
++	{OV5693_8BIT, 0x37c5, 0x00},
++	{OV5693_8BIT, 0x37c6, 0x00},
++	{OV5693_8BIT, 0x37c7, 0x00},
++	{OV5693_8BIT, 0x37c9, 0x00},
++	{OV5693_8BIT, 0x37ca, 0x00},
++	{OV5693_8BIT, 0x37cb, 0x00},
++	{OV5693_8BIT, 0x37de, 0x00},
++	{OV5693_8BIT, 0x37df, 0x00},
++	{OV5693_8BIT, 0x3800, 0x00},
++	{OV5693_8BIT, 0x3801, 0x00},
++	{OV5693_8BIT, 0x3802, 0x00},
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3810, 0x00},
++	{OV5693_8BIT, 0x3812, 0x00},
++	{OV5693_8BIT, 0x3823, 0x00},
++	{OV5693_8BIT, 0x3824, 0x00},
++	{OV5693_8BIT, 0x3825, 0x00},
++	{OV5693_8BIT, 0x3826, 0x00},
++	{OV5693_8BIT, 0x3827, 0x00},
++	{OV5693_8BIT, 0x382a, 0x04},
++	{OV5693_8BIT, 0x3a04, 0x06},
++	{OV5693_8BIT, 0x3a05, 0x14},
++	{OV5693_8BIT, 0x3a06, 0x00},
++	{OV5693_8BIT, 0x3a07, 0xfe},
++	{OV5693_8BIT, 0x3b00, 0x00},
++	{OV5693_8BIT, 0x3b02, 0x00},
++	{OV5693_8BIT, 0x3b03, 0x00},
++	{OV5693_8BIT, 0x3b04, 0x00},
++	{OV5693_8BIT, 0x3b05, 0x00},
++	{OV5693_8BIT, 0x3e07, 0x20},
++	{OV5693_8BIT, 0x4000, 0x08},
++	{OV5693_8BIT, 0x4001, 0x04},
++	{OV5693_8BIT, 0x4002, 0x45},
++	{OV5693_8BIT, 0x4004, 0x08},
++	{OV5693_8BIT, 0x4005, 0x18},
++	{OV5693_8BIT, 0x4006, 0x20},
++	{OV5693_8BIT, 0x4008, 0x24},
++	{OV5693_8BIT, 0x4009, 0x10},
++	{OV5693_8BIT, 0x400c, 0x00},
++	{OV5693_8BIT, 0x400d, 0x00},
++	{OV5693_8BIT, 0x4058, 0x00},
++	{OV5693_8BIT, 0x404e, 0x37},
++	{OV5693_8BIT, 0x404f, 0x8f},
++	{OV5693_8BIT, 0x4058, 0x00},
++	{OV5693_8BIT, 0x4101, 0xb2},
++	{OV5693_8BIT, 0x4303, 0x00},
++	{OV5693_8BIT, 0x4304, 0x08},
++	{OV5693_8BIT, 0x4307, 0x31},
++	{OV5693_8BIT, 0x4311, 0x04},
++	{OV5693_8BIT, 0x4315, 0x01},
++	{OV5693_8BIT, 0x4511, 0x05},
++	{OV5693_8BIT, 0x4512, 0x01},
++	{OV5693_8BIT, 0x4806, 0x00},
++	{OV5693_8BIT, 0x4816, 0x52},
++	{OV5693_8BIT, 0x481f, 0x30},
++	{OV5693_8BIT, 0x4826, 0x2c},
++	{OV5693_8BIT, 0x4831, 0x64},
++	{OV5693_8BIT, 0x4d00, 0x04},
++	{OV5693_8BIT, 0x4d01, 0x71},
++	{OV5693_8BIT, 0x4d02, 0xfd},
++	{OV5693_8BIT, 0x4d03, 0xf5},
++	{OV5693_8BIT, 0x4d04, 0x0c},
++	{OV5693_8BIT, 0x4d05, 0xcc},
++	{OV5693_8BIT, 0x4837, 0x0a},
++	{OV5693_8BIT, 0x5000, 0x06},
++	{OV5693_8BIT, 0x5001, 0x01},
++	{OV5693_8BIT, 0x5003, 0x20},
++	{OV5693_8BIT, 0x5046, 0x0a},
++	{OV5693_8BIT, 0x5013, 0x00},
++	{OV5693_8BIT, 0x5046, 0x0a},
++	{OV5693_8BIT, 0x5780, 0x1c},
++	{OV5693_8BIT, 0x5786, 0x20},
++	{OV5693_8BIT, 0x5787, 0x10},
++	{OV5693_8BIT, 0x5788, 0x18},
++	{OV5693_8BIT, 0x578a, 0x04},
++	{OV5693_8BIT, 0x578b, 0x02},
++	{OV5693_8BIT, 0x578c, 0x02},
++	{OV5693_8BIT, 0x578e, 0x06},
++	{OV5693_8BIT, 0x578f, 0x02},
++	{OV5693_8BIT, 0x5790, 0x02},
++	{OV5693_8BIT, 0x5791, 0xff},
++	{OV5693_8BIT, 0x5842, 0x01},
++	{OV5693_8BIT, 0x5843, 0x2b},
++	{OV5693_8BIT, 0x5844, 0x01},
++	{OV5693_8BIT, 0x5845, 0x92},
++	{OV5693_8BIT, 0x5846, 0x01},
++	{OV5693_8BIT, 0x5847, 0x8f},
++	{OV5693_8BIT, 0x5848, 0x01},
++	{OV5693_8BIT, 0x5849, 0x0c},
++	{OV5693_8BIT, 0x5e00, 0x00},
++	{OV5693_8BIT, 0x5e10, 0x0c},
++	{OV5693_8BIT, 0x0100, 0x00},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++#if ENABLE_NON_PREVIEW
++/*
++ * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling)
++ */
++static struct ov5693_reg const ov5693_654x496[] = {
++	{OV5693_8BIT, 0x3501, 0x3d},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe6},
++	{OV5693_8BIT, 0x3709, 0xc7},
++	{OV5693_8BIT, 0x3803, 0x00},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xa3},
++	{OV5693_8BIT, 0x3808, 0x02},
++	{OV5693_8BIT, 0x3809, 0x90},
++	{OV5693_8BIT, 0x380a, 0x01},
++	{OV5693_8BIT, 0x380b, 0xf0},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x08},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x04},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*
++ * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling)
++*DS from 2592x1952
++*/
++static struct ov5693_reg const ov5693_1296x976[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++
++	{OV5693_8BIT, 0x3800, 0x00},
++	{OV5693_8BIT, 0x3801, 0x00},
++	{OV5693_8BIT, 0x3802, 0x00},
++	{OV5693_8BIT, 0x3803, 0x00},
++
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xA3},
++
++	{OV5693_8BIT, 0x3808, 0x05},
++	{OV5693_8BIT, 0x3809, 0x10},
++	{OV5693_8BIT, 0x380a, 0x03},
++	{OV5693_8BIT, 0x380b, 0xD0},
++
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++
++	{OV5693_8BIT, 0x3810, 0x00},
++	{OV5693_8BIT, 0x3811, 0x10},
++	{OV5693_8BIT, 0x3812, 0x00},
++	{OV5693_8BIT, 0x3813, 0x02},
++
++	{OV5693_8BIT, 0x3814, 0x11},	/*X subsample control*/
++	{OV5693_8BIT, 0x3815, 0x11},	/*Y subsample control*/
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++
++};
++
++/*
++ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
++ DS from 2564x1956
++ */
++static struct ov5693_reg const ov5693_336x256[] = {
++	{OV5693_8BIT, 0x3501, 0x3d},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe6},
++	{OV5693_8BIT, 0x3709, 0xc7},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xa3},
++	{OV5693_8BIT, 0x3808, 0x01},
++	{OV5693_8BIT, 0x3809, 0x50},
++	{OV5693_8BIT, 0x380a, 0x01},
++	{OV5693_8BIT, 0x380b, 0x00},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x1E},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x04},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*
++ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
++ DS from 2368x1956
++ */
++static struct ov5693_reg const ov5693_368x304[] = {
++	{OV5693_8BIT, 0x3501, 0x3d},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe6},
++	{OV5693_8BIT, 0x3709, 0xc7},
++	{OV5693_8BIT, 0x3808, 0x01},
++	{OV5693_8BIT, 0x3809, 0x70},
++	{OV5693_8BIT, 0x380a, 0x01},
++	{OV5693_8BIT, 0x380b, 0x30},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x80},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x04},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*
++ * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling)
++ DS from 2460x1956
++ */
++static struct ov5693_reg const ov5693_192x160[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x80},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xA3},
++	{OV5693_8BIT, 0x3808, 0x00},
++	{OV5693_8BIT, 0x3809, 0xC0},
++	{OV5693_8BIT, 0x380a, 0x00},
++	{OV5693_8BIT, 0x380b, 0xA0},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x40},
++	{OV5693_8BIT, 0x3813, 0x00},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x04},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++static struct ov5693_reg const ov5693_736x496[] = {
++	{OV5693_8BIT, 0x3501, 0x3d},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe6},
++	{OV5693_8BIT, 0x3709, 0xc7},
++	{OV5693_8BIT, 0x3803, 0x68},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0x3b},
++	{OV5693_8BIT, 0x3808, 0x02},
++	{OV5693_8BIT, 0x3809, 0xe0},
++	{OV5693_8BIT, 0x380a, 0x01},
++	{OV5693_8BIT, 0x380b, 0xf0},
++	{OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07}, /*vts*/
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x08},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x04},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++#endif
++
++/*
++static struct ov5693_reg const ov5693_736x496[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe6},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0x00},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xa3},
++	{OV5693_8BIT, 0x3808, 0x02},
++	{OV5693_8BIT, 0x3809, 0xe0},
++	{OV5693_8BIT, 0x380a, 0x01},
++	{OV5693_8BIT, 0x380b, 0xf0},
++	{OV5693_8BIT, 0x380c, 0x0d},
++	{OV5693_8BIT, 0x380d, 0xb0},
++	{OV5693_8BIT, 0x380e, 0x05},
++	{OV5693_8BIT, 0x380f, 0xf2},
++	{OV5693_8BIT, 0x3811, 0x08},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x31},
++	{OV5693_8BIT, 0x3815, 0x31},
++	{OV5693_8BIT, 0x3820, 0x01},
++	{OV5693_8BIT, 0x3821, 0x1f},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++*/
++/*
++ * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling)
++ */
++#if ENABLE_NON_PREVIEW
++static struct ov5693_reg const ov5693_976x556[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0xf0},
++	{OV5693_8BIT, 0x3806, 0x06},
++	{OV5693_8BIT, 0x3807, 0xa7},
++	{OV5693_8BIT, 0x3808, 0x03},
++	{OV5693_8BIT, 0x3809, 0xd0},
++	{OV5693_8BIT, 0x380a, 0x02},
++	{OV5693_8BIT, 0x380b, 0x2C},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x10},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*DS from 2624x1492*/
++static struct ov5693_reg const ov5693_1296x736[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++
++	{OV5693_8BIT, 0x3800, 0x00},
++	{OV5693_8BIT, 0x3801, 0x00},
++	{OV5693_8BIT, 0x3802, 0x00},
++	{OV5693_8BIT, 0x3803, 0x00},
++
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xA3},
++
++	{OV5693_8BIT, 0x3808, 0x05},
++	{OV5693_8BIT, 0x3809, 0x10},
++	{OV5693_8BIT, 0x380a, 0x02},
++	{OV5693_8BIT, 0x380b, 0xe0},
++
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++
++	{OV5693_8BIT, 0x3813, 0xE8},
++
++	{OV5693_8BIT, 0x3814, 0x11},	/*X subsample control*/
++	{OV5693_8BIT, 0x3815, 0x11},	/*Y subsample control*/
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++static struct ov5693_reg const ov5693_1636p_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0xf0},
++	{OV5693_8BIT, 0x3806, 0x06},
++	{OV5693_8BIT, 0x3807, 0xa7},
++	{OV5693_8BIT, 0x3808, 0x06},
++	{OV5693_8BIT, 0x3809, 0x64},
++	{OV5693_8BIT, 0x380a, 0x04},
++	{OV5693_8BIT, 0x380b, 0x48},
++	{OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07}, /*vts*/
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x02},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++#endif
++
++static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x80},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3800, 0x00},	/*{3800,3801} Array X start*/
++	{OV5693_8BIT, 0x3801, 0x08},	/* 04 //{3800,3801} Array X start*/
++	{OV5693_8BIT, 0x3802, 0x00},	/*{3802,3803} Array Y start*/
++	{OV5693_8BIT, 0x3803, 0x04},	/* 00  //{3802,3803} Array Y start*/
++	{OV5693_8BIT, 0x3804, 0x0a},	/*{3804,3805} Array X end*/
++	{OV5693_8BIT, 0x3805, 0x37},	/* 3b  //{3804,3805} Array X end*/
++	{OV5693_8BIT, 0x3806, 0x07},	/*{3806,3807} Array Y end*/
++	{OV5693_8BIT, 0x3807, 0x9f},	/* a3  //{3806,3807} Array Y end*/
++	{OV5693_8BIT, 0x3808, 0x06},	/*{3808,3809} Final output H size*/
++	{OV5693_8BIT, 0x3809, 0x50},	/*{3808,3809} Final output H size*/
++	{OV5693_8BIT, 0x380a, 0x04},	/*{380a,380b} Final output V size*/
++	{OV5693_8BIT, 0x380b, 0xc0},	/*{380a,380b} Final output V size*/
++	{OV5693_8BIT, 0x380c, 0x0a},	/*{380c,380d} HTS*/
++	{OV5693_8BIT, 0x380d, 0x80},	/*{380c,380d} HTS*/
++	{OV5693_8BIT, 0x380e, 0x07},	/*{380e,380f} VTS*/
++	{OV5693_8BIT, 0x380f, 0xc0},	/* bc	//{380e,380f} VTS*/
++	{OV5693_8BIT, 0x3810, 0x00},	/*{3810,3811} windowing X offset*/
++	{OV5693_8BIT, 0x3811, 0x10},	/*{3810,3811} windowing X offset*/
++	{OV5693_8BIT, 0x3812, 0x00},	/*{3812,3813} windowing Y offset*/
++	{OV5693_8BIT, 0x3813, 0x06},	/*{3812,3813} windowing Y offset*/
++	{OV5693_8BIT, 0x3814, 0x11},	/*X subsample control*/
++	{OV5693_8BIT, 0x3815, 0x11},	/*Y subsample control*/
++	{OV5693_8BIT, 0x3820, 0x00},	/*FLIP/Binnning control*/
++	{OV5693_8BIT, 0x3821, 0x1e},	/*MIRROR control*/
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x5041, 0x84},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*
++ * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling)
++ */
++#if ENABLE_NON_PREVIEW
++static struct ov5693_reg const ov5693_1940x1096[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0xf0},
++	{OV5693_8BIT, 0x3806, 0x06},
++	{OV5693_8BIT, 0x3807, 0xa7},
++	{OV5693_8BIT, 0x3808, 0x07},
++	{OV5693_8BIT, 0x3809, 0x94},
++	{OV5693_8BIT, 0x380a, 0x04},
++	{OV5693_8BIT, 0x380b, 0x48},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x02},
++	{OV5693_8BIT, 0x3813, 0x02},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x80},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++static struct ov5693_reg const ov5693_2592x1456_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3800, 0x00},
++	{OV5693_8BIT, 0x3801, 0x00},
++	{OV5693_8BIT, 0x3802, 0x00},
++	{OV5693_8BIT, 0x3803, 0xf0},
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3806, 0x06},
++	{OV5693_8BIT, 0x3807, 0xa4},
++	{OV5693_8BIT, 0x3808, 0x0a},
++	{OV5693_8BIT, 0x3809, 0x20},
++	{OV5693_8BIT, 0x380a, 0x05},
++	{OV5693_8BIT, 0x380b, 0xb0},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x10},
++	{OV5693_8BIT, 0x3813, 0x00},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_TOK_TERM, 0, 0}
++};
++#endif
++
++static struct ov5693_reg const ov5693_2576x1456_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3800, 0x00},
++	{OV5693_8BIT, 0x3801, 0x00},
++	{OV5693_8BIT, 0x3802, 0x00},
++	{OV5693_8BIT, 0x3803, 0xf0},
++	{OV5693_8BIT, 0x3804, 0x0a},
++	{OV5693_8BIT, 0x3805, 0x3f},
++	{OV5693_8BIT, 0x3806, 0x06},
++	{OV5693_8BIT, 0x3807, 0xa4},
++	{OV5693_8BIT, 0x3808, 0x0a},
++	{OV5693_8BIT, 0x3809, 0x10},
++	{OV5693_8BIT, 0x380a, 0x05},
++	{OV5693_8BIT, 0x380b, 0xb0},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x18},
++	{OV5693_8BIT, 0x3813, 0x00},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++/*
++ * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit
++ */
++#if ENABLE_NON_PREVIEW
++static struct ov5693_reg const ov5693_2592x1944_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0x00},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xa3},
++	{OV5693_8BIT, 0x3808, 0x0a},
++	{OV5693_8BIT, 0x3809, 0x20},
++	{OV5693_8BIT, 0x380a, 0x07},
++	{OV5693_8BIT, 0x380b, 0x98},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x10},
++	{OV5693_8BIT, 0x3813, 0x00},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++#endif
++
++/*
++ * 11:9 Full FOV Output, expected FOV Res: 2346x1920
++ * ISP Effect Res: 1408x1152
++ * Sensor out: 1424x1168, DS From: 2380x1952
++ *
++ * WA: Left Offset: 8, Hor scal: 64
++ */
++#if ENABLE_NON_PREVIEW
++static struct ov5693_reg const ov5693_1424x1168_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
++	{OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
++	{OV5693_8BIT, 0x3801, 0x50}, /* 80 */
++	{OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
++	{OV5693_8BIT, 0x3803, 0x02}, /* 2 */
++	{OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */
++	{OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */
++	{OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
++	{OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */
++	{OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */
++	{OV5693_8BIT, 0x3809, 0x90}, /* 1424 */
++	{OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */
++	{OV5693_8BIT, 0x380b, 0x90}, /* 1168 */
++	{OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
++	{OV5693_8BIT, 0x3811, 0x02}, /* 2 */
++	{OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
++	{OV5693_8BIT, 0x3813, 0x00}, /* 0 */
++	{OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
++	{OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++#endif
++
++/*
++ * 3:2 Full FOV Output, expected FOV Res: 2560x1706
++ * ISP Effect Res: 720x480
++ * Sensor out: 736x496, DS From 2616x1764
++ */
++static struct ov5693_reg const ov5693_736x496_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
++	{OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
++	{OV5693_8BIT, 0x3801, 0x02}, /* 2 */
++	{OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
++	{OV5693_8BIT, 0x3803, 0x62}, /* 98 */
++	{OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */
++	{OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */
++	{OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
++	{OV5693_8BIT, 0x3807, 0x43}, /* 1859 */
++	{OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */
++	{OV5693_8BIT, 0x3809, 0xe0}, /* 736 */
++	{OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */
++	{OV5693_8BIT, 0x380b, 0xf0}, /* 496 */
++	{OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
++	{OV5693_8BIT, 0x3811, 0x02}, /* 2 */
++	{OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
++	{OV5693_8BIT, 0x3813, 0x00}, /* 0 */
++	{OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
++	{OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++static struct ov5693_reg const ov5693_2576x1936_30fps[] = {
++	{OV5693_8BIT, 0x3501, 0x7b},
++	{OV5693_8BIT, 0x3502, 0x00},
++	{OV5693_8BIT, 0x3708, 0xe2},
++	{OV5693_8BIT, 0x3709, 0xc3},
++	{OV5693_8BIT, 0x3803, 0x00},
++	{OV5693_8BIT, 0x3806, 0x07},
++	{OV5693_8BIT, 0x3807, 0xa3},
++	{OV5693_8BIT, 0x3808, 0x0a},
++	{OV5693_8BIT, 0x3809, 0x10},
++	{OV5693_8BIT, 0x380a, 0x07},
++	{OV5693_8BIT, 0x380b, 0x90},
++	{OV5693_8BIT, 0x380c, 0x0a},
++	{OV5693_8BIT, 0x380d, 0x80},
++	{OV5693_8BIT, 0x380e, 0x07},
++	{OV5693_8BIT, 0x380f, 0xc0},
++	{OV5693_8BIT, 0x3811, 0x18},
++	{OV5693_8BIT, 0x3813, 0x00},
++	{OV5693_8BIT, 0x3814, 0x11},
++	{OV5693_8BIT, 0x3815, 0x11},
++	{OV5693_8BIT, 0x3820, 0x00},
++	{OV5693_8BIT, 0x3821, 0x1e},
++	{OV5693_8BIT, 0x5002, 0x00},
++	{OV5693_8BIT, 0x0100, 0x01},
++	{OV5693_TOK_TERM, 0, 0}
++};
++
++static struct ov5693_resolution ov5693_res_preview[] = {
++	{
++		.desc = "ov5693_736x496_30fps",
++		.width = 736,
++		.height = 496,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_736x496_30fps,
++	},
++	{
++		.desc = "ov5693_1616x1216_30fps",
++		.width = 1616,
++		.height = 1216,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_1616x1216_30fps,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2576,
++		.height = 1456,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2576x1456_30fps,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2576,
++		.height = 1936,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2576x1936_30fps,
++	},
++};
++
++#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview))
++
++/*
++ * Disable non-preview configurations until the configuration selection is
++ * improved.
++ */
++#if ENABLE_NON_PREVIEW
++struct ov5693_resolution ov5693_res_still[] = {
++	{
++		.desc = "ov5693_736x496_30fps",
++		.width = 736,
++		.height = 496,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_736x496_30fps,
++	},
++	{
++		.desc = "ov5693_1424x1168_30fps",
++		.width = 1424,
++		.height = 1168,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_1424x1168_30fps,
++	},
++	{
++		.desc = "ov5693_1616x1216_30fps",
++		.width = 1616,
++		.height = 1216,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_1616x1216_30fps,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2592,
++		.height = 1456,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2592x1456_30fps,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2592,
++		.height = 1944,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2592x1944_30fps,
++	},
++};
++
++#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still))
++
++struct ov5693_resolution ov5693_res_video[] = {
++	{
++		.desc = "ov5693_736x496_30fps",
++		.width = 736,
++		.height = 496,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 1,
++		.regs = ov5693_736x496,
++	},
++	{
++		.desc = "ov5693_336x256_30fps",
++		.width = 336,
++		.height = 256,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 1,
++		.regs = ov5693_336x256,
++	},
++	{
++		.desc = "ov5693_368x304_30fps",
++		.width = 368,
++		.height = 304,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 1,
++		.regs = ov5693_368x304,
++	},
++	{
++		.desc = "ov5693_192x160_30fps",
++		.width = 192,
++		.height = 160,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 1,
++		.regs = ov5693_192x160,
++	},
++	{
++		.desc = "ov5693_1296x736_30fps",
++		.width = 1296,
++		.height = 736,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 0,
++		.regs = ov5693_1296x736,
++	},
++	{
++		.desc = "ov5693_1296x976_30fps",
++		.width = 1296,
++		.height = 976,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 2,
++		.bin_factor_y = 2,
++		.bin_mode = 0,
++		.regs = ov5693_1296x976,
++	},
++	{
++		.desc = "ov5693_1636P_30fps",
++		.width = 1636,
++		.height = 1096,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_1636p_30fps,
++	},
++	{
++		.desc = "ov5693_1080P_30fps",
++		.width = 1940,
++		.height = 1096,
++		.fps = 30,
++		.pix_clk_freq = 160,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_1940x1096,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2592,
++		.height = 1456,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2592x1456_30fps,
++	},
++	{
++		.desc = "ov5693_5M_30fps",
++		.width = 2592,
++		.height = 1944,
++		.pix_clk_freq = 160,
++		.fps = 30,
++		.used = 0,
++		.pixels_per_line = 2688,
++		.lines_per_frame = 1984,
++		.bin_factor_x = 1,
++		.bin_factor_y = 1,
++		.bin_mode = 0,
++		.regs = ov5693_2592x1944_30fps,
++		.crop = {
++			.left = 0,
++			.top = 0,
++			.width = 2592,
++			.height = 1944
++		},
++	},
++};
++
++#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
++#endif
++
++static struct ov5693_resolution *ov5693_res = ov5693_res_video;
++static unsigned long N_RES = N_RES_VIDEO;
++#endif
+-- 
+2.30.1
+
+From 1a0606e05a713465f2d4396b926448029cb383ab Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sun, 17 Jan 2021 19:08:18 +0000
+Subject: [PATCH] media: i2c: Add reset pin toggling to ov5693
+
+The ov5693 has an xshutdown pin which can be present and, if so, needs
+toggling as part of power on sequence.
+
+Add calls to handle the reset GPIO
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/i2c/ov5693.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
+index 32485e4ed42b..f9ced52ad37a 100644
+--- a/drivers/media/i2c/ov5693.c
++++ b/drivers/media/i2c/ov5693.c
+@@ -1085,6 +1085,8 @@ static int __power_up(struct v4l2_subdev *sd)
+ 	if (ret)
+ 		goto fail_power;
+ 
++	gpiod_set_value_cansleep(sensor->reset, 0);
++
+ 	__cci_delay(up_delay);
+ 
+ 	return 0;
+@@ -1103,6 +1105,8 @@ static int power_down(struct v4l2_subdev *sd)
+ 
+ 	dev->focus = OV5693_INVALID_CONFIG;
+ 
++	gpiod_set_value_cansleep(sensor->reset, 1);
++
+ 	clk_disable_unprepare(dev->clk);
+ 
+ 	if (dev->indicator_led)
+-- 
+2.30.1
+
+From 8483265d4a23150a773fd0fb760f2bf8c5307426 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sun, 17 Jan 2021 21:39:15 +0000
+Subject: [PATCH] media: i2c: Fix misnamed variable in power_down() for ov5693
+
+Fix the misnamed variable in gpiod_set_value_cansleep().
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ drivers/media/i2c/ov5693.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
+index f9ced52ad37a..9fd44a3d1d85 100644
+--- a/drivers/media/i2c/ov5693.c
++++ b/drivers/media/i2c/ov5693.c
+@@ -1105,7 +1105,7 @@ static int power_down(struct v4l2_subdev *sd)
+ 
+ 	dev->focus = OV5693_INVALID_CONFIG;
+ 
+-	gpiod_set_value_cansleep(sensor->reset, 1);
++	gpiod_set_value_cansleep(dev->reset, 1);
+ 
+ 	clk_disable_unprepare(dev->clk);
+ 
+-- 
+2.30.1
+
+From d90e95ac0ba73580c98af561a74152d538d7fbac Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fabian=20W=C3=BCthrich?= <me@fabwu.ch>
+Date: Fri, 22 Jan 2021 20:58:13 +0100
+Subject: [PATCH] cio2-bridge: Parse sensor orientation and rotation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The sensor orientation is read from the _PLC ACPI buffer and converted
+to a v4l2 format.
+
+See https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
+page 351 for a definition of the Panel property.
+
+The sensor rotation is read from the SSDB ACPI buffer and converted into
+degrees.
+
+Signed-off-by: Fabian Wüthrich <me@fabwu.ch>
+Patchset: cameras
+---
+ drivers/media/pci/intel/ipu3/cio2-bridge.c | 45 ++++++++++++++++++++--
+ drivers/media/pci/intel/ipu3/cio2-bridge.h |  3 ++
+ 2 files changed, 44 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+index 143f3c0f445e..806d4e5fc177 100644
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+@@ -29,6 +29,7 @@ static const struct cio2_sensor_config cio2_supported_sensors[] = {
+ static const struct cio2_property_names prop_names = {
+ 	.clock_frequency = "clock-frequency",
+ 	.rotation = "rotation",
++	.orientation = "orientation",
+ 	.bus_type = "bus-type",
+ 	.data_lanes = "data-lanes",
+ 	.remote_endpoint = "remote-endpoint",
+@@ -72,11 +73,36 @@ static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ 	return ret;
+ }
+ 
++static u32 cio2_bridge_parse_rotation(u8 rotation)
++{
++	if (rotation == 1)
++		return 180;
++	return 0;
++}
++
++static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(u8 panel)
++{
++	switch (panel) {
++	case 4:
++		return V4L2_FWNODE_ORIENTATION_FRONT;
++	case 5:
++		return V4L2_FWNODE_ORIENTATION_BACK;
++	default:
++		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
++	}
++}
++
+ static void cio2_bridge_create_fwnode_properties(
+ 	struct cio2_sensor *sensor,
+ 	struct cio2_bridge *bridge,
+ 	const struct cio2_sensor_config *cfg)
+ {
++	u32 rotation;
++	enum v4l2_fwnode_orientation orientation;
++
++	rotation = cio2_bridge_parse_rotation(sensor->ssdb.degree);
++	orientation = cio2_bridge_parse_orientation(sensor->pld->panel);
++
+ 	sensor->prop_names = prop_names;
+ 
+ 	sensor->local_ref[0].node = &sensor->swnodes[SWNODE_CIO2_ENDPOINT];
+@@ -85,9 +111,12 @@ static void cio2_bridge_create_fwnode_properties(
+ 	sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
+ 					sensor->prop_names.clock_frequency,
+ 					sensor->ssdb.mclkspeed);
+-	sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
++	sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
+ 					sensor->prop_names.rotation,
+-					sensor->ssdb.degree);
++					rotation);
++	sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
++					sensor->prop_names.orientation,
++					orientation);
+ 
+ 	sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
+ 					sensor->prop_names.bus_type,
+@@ -159,6 +188,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
+ 	for (i = 0; i < bridge->n_sensors; i++) {
+ 		sensor = &bridge->sensors[i];
+ 		software_node_unregister_nodes(sensor->swnodes);
++		ACPI_FREE(sensor->pld);
+ 		acpi_dev_put(sensor->adev);
+ 	}
+ }
+@@ -170,6 +200,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ 	struct fwnode_handle *fwnode;
+ 	struct cio2_sensor *sensor;
+ 	struct acpi_device *adev;
++	acpi_status status;
+ 	int ret;
+ 
+ 	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+@@ -193,11 +224,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ 		if (ret)
+ 			goto err_put_adev;
+ 
++		status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
++		if (ACPI_FAILURE(status))
++			goto err_put_adev;
++
+ 		if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
+ 			dev_err(&adev->dev,
+ 				"Number of lanes in SSDB is invalid\n");
+ 			ret = -EINVAL;
+-			goto err_put_adev;
++			goto err_free_pld;
+ 		}
+ 
+ 		cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
+@@ -205,7 +240,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ 
+ 		ret = software_node_register_nodes(sensor->swnodes);
+ 		if (ret)
+-			goto err_put_adev;
++			goto err_free_pld;
+ 
+ 		fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_SENSOR_HID]);
+ 		if (!fwnode) {
+@@ -225,6 +260,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
+ 
+ err_free_swnodes:
+ 	software_node_unregister_nodes(sensor->swnodes);
++err_free_pld:
++	ACPI_FREE(sensor->pld);
+ err_put_adev:
+ 	acpi_dev_put(sensor->adev);
+ err_out:
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+index dd0ffcafa489..924d99d20328 100644
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+@@ -80,6 +80,7 @@ struct cio2_sensor_ssdb {
+ struct cio2_property_names {
+ 	char clock_frequency[16];
+ 	char rotation[9];
++	char orientation[12];
+ 	char bus_type[9];
+ 	char data_lanes[11];
+ 	char remote_endpoint[16];
+@@ -106,6 +107,8 @@ struct cio2_sensor {
+ 	struct cio2_node_names node_names;
+ 
+ 	struct cio2_sensor_ssdb ssdb;
++	struct acpi_pld_info *pld;
++
+ 	struct cio2_property_names prop_names;
+ 	struct property_entry ep_properties[5];
+ 	struct property_entry dev_properties[3];
+-- 
+2.30.1
+
+From 89fb5c747609d2c10c77f6595638883a5700e395 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fabian=20W=C3=BCthrich?= <me@fabwu.ch>
+Date: Fri, 22 Jan 2021 21:23:47 +0100
+Subject: [PATCH] ov5693: Add orientation and rotation controls
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+    Parse orientation and rotation from fwnodes and initialize the
+    respective controls.
+
+Signed-off-by: Fabian Wüthrich <me@fabwu.ch>
+Patchset: cameras
+---
+ drivers/media/i2c/ov5693.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
+index 9fd44a3d1d85..1a85800df7ed 100644
+--- a/drivers/media/i2c/ov5693.c
++++ b/drivers/media/i2c/ov5693.c
+@@ -31,6 +31,7 @@
+ #include <linux/i2c.h>
+ #include <linux/moduleparam.h>
+ #include <media/v4l2-device.h>
++#include <media/v4l2-fwnode.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+ #include <linux/regulator/consumer.h>
+@@ -1608,6 +1609,7 @@ static int ov5693_init_controls(struct ov5693_device *ov5693)
+ {
+ 	struct i2c_client *client = v4l2_get_subdevdata(&ov5693->sd);
+ 	const struct v4l2_ctrl_ops *ops = &ov5693_ctrl_ops;
++	struct v4l2_fwnode_device_properties props;
+ 	struct v4l2_ctrl *ctrl;
+ 	unsigned int i;
+ 	int ret;
+@@ -1663,6 +1665,15 @@ static int ov5693_init_controls(struct ov5693_device *ov5693)
+ 	if (ov5693->hblank)
+ 		ov5693->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ 
++	/* set properties from fwnode (e.g. rotation, orientation) */
++	ret = v4l2_fwnode_device_parse(&client->dev, &props);
++	if (ret)
++		return ret;
++
++	ret = v4l2_ctrl_new_fwnode_properties(&ov5693->ctrl_handler, ops, &props);
++	if (ret)
++		return ret;
++
+ 	/* Use same lock for controls as for everything else. */
+ 	ov5693->ctrl_handler.lock = &ov5693->input_lock;
+ 	ov5693->sd.ctrl_handler = &ov5693->ctrl_handler;
+-- 
+2.30.1
+
+From 2ba5f312ff72c2744848983a4098177cbf054081 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 23 Jan 2021 00:28:32 +0000
+Subject: [PATCH] platform: x86: Stylistic updates for intel-skl-int3472
+
+This commit makes a bunch of stylistic updates, minor changes and other
+stuff that's part of the improvements pass I'm doing to the code after
+taking into account feedback from the list.
+
+It also alters the ACPI buffer fetching code to be more generalisable so
+I can re-use it to fetch the clock frequency.
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ .../platform/x86/intel_skl_int3472_common.c   |  37 ++++---
+ .../platform/x86/intel_skl_int3472_common.h   |   7 +-
+ .../platform/x86/intel_skl_int3472_discrete.c | 101 +++++++++---------
+ .../platform/x86/intel_skl_int3472_tps68470.c |  16 +--
+ 4 files changed, 89 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/platform/x86/intel_skl_int3472_common.c b/drivers/platform/x86/intel_skl_int3472_common.c
+index 08cb9d3c06aa..549d211979e1 100644
+--- a/drivers/platform/x86/intel_skl_int3472_common.c
++++ b/drivers/platform/x86/intel_skl_int3472_common.c
+@@ -7,41 +7,52 @@
+ 
+ #include "intel_skl_int3472_common.h"
+ 
+-int skl_int3472_get_cldb_buffer(struct acpi_device *adev,
+-				struct int3472_cldb *cldb)
++union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
++					       char *id)
+ {
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	acpi_handle handle = adev->handle;
+ 	union acpi_object *obj;
+ 	acpi_status status;
+-	int ret = 0;
+ 
+-	status = acpi_evaluate_object(handle, "CLDB", NULL, &buffer);
++	status = acpi_evaluate_object(handle, id, NULL, &buffer);
+ 	if (ACPI_FAILURE(status))
+-		return -ENODEV;
++		return ERR_PTR(-ENODEV);
+ 
+ 	obj = buffer.pointer;
+ 	if (!obj) {
+-		dev_err(&adev->dev, "ACPI device has no CLDB object\n");
+-		return -ENODEV;
++		dev_err(&adev->dev, "ACPI device has no %s object\n", id);
++		return ERR_PTR(-ENODEV);
+ 	}
+ 
+ 	if (obj->type != ACPI_TYPE_BUFFER) {
+-		dev_err(&adev->dev, "CLDB object is not an ACPI buffer\n");
+-		ret = -EINVAL;
+-		goto out_free_buff;
++		dev_err(&adev->dev, "%s object is not an ACPI buffer\n", id);
++		kfree(obj);
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	return obj;
++}
++
++int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
++{
++	union acpi_object *obj;
++	int ret = 0;
++
++	obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
++	if (IS_ERR(obj))
++		return PTR_ERR(obj);
++
+ 	if (obj->buffer.length > sizeof(*cldb)) {
+ 		dev_err(&adev->dev, "The CLDB buffer is too large\n");
+ 		ret = -EINVAL;
+-		goto out_free_buff;
++		goto out_free_obj;
+ 	}
+ 
+ 	memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
+ 
+-out_free_buff:
+-	kfree(buffer.pointer);
++out_free_obj:
++	kfree(obj);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/platform/x86/intel_skl_int3472_common.h b/drivers/platform/x86/intel_skl_int3472_common.h
+index 4ac6bb2b223f..e1083bb67dc6 100644
+--- a/drivers/platform/x86/intel_skl_int3472_common.h
++++ b/drivers/platform/x86/intel_skl_int3472_common.h
+@@ -29,7 +29,7 @@
+ 
+ #define INT3472_GPIO_FUNCTION_REMAP(_PIN, _FUNCTION)		\
+ 	(const struct int3472_gpio_function_remap) {		\
+-		.documented = _PIN,					\
++		.documented = _PIN,				\
+ 		.actual = _FUNCTION				\
+ 	}
+ 
+@@ -95,5 +95,6 @@ struct int3472_sensor_config {
+ int skl_int3472_discrete_probe(struct platform_device *pdev);
+ int skl_int3472_discrete_remove(struct platform_device *pdev);
+ int skl_int3472_tps68470_probe(struct i2c_client *client);
+-int skl_int3472_get_cldb_buffer(struct acpi_device *adev,
+-				struct int3472_cldb *cldb);
++union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
++					       char *id);
++int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+diff --git a/drivers/platform/x86/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel_skl_int3472_discrete.c
+index ea7e57f3e3f0..42ae8396eb64 100644
+--- a/drivers/platform/x86/intel_skl_int3472_discrete.c
++++ b/drivers/platform/x86/intel_skl_int3472_discrete.c
+@@ -12,12 +12,12 @@
+ 
+ #include "intel_skl_int3472_common.h"
+ 
+-/* 79234640-9e10-4fea-a5c1b5aa8b19756f */
++/* 79234640-9e10-4fea-a5c1-b5aa8b19756f */
+ static const guid_t int3472_gpio_guid =
+ 	GUID_INIT(0x79234640, 0x9e10, 0x4fea,
+ 		  0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
+ 
+-/* 822ace8f-2814-4174-a56b5f029fe079ee */
++/* 822ace8f-2814-4174-a56b-5f029fe079ee */
+ static const guid_t cio2_sensor_module_guid =
+ 	GUID_INIT(0x822ace8f, 0x2814, 0x4174,
+ 		  0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
+@@ -94,7 +94,7 @@ static const struct clk_ops skl_int3472_clock_ops = {
+ };
+ 
+ static struct int3472_sensor_config *
+-int3472_get_sensor_module_config(struct int3472_device *int3472)
++skl_int3472_get_sensor_module_config(struct int3472_device *int3472)
+ {
+ 	unsigned int i = ARRAY_SIZE(int3472_sensor_configs);
+ 	struct int3472_sensor_config *ret;
+@@ -131,9 +131,9 @@ int3472_get_sensor_module_config(struct int3472_device *int3472)
+ 	return ret;
+ }
+ 
+-static int int3472_map_gpio_to_sensor(struct int3472_device *int3472,
+-				      struct acpi_resource *ares,
+-				      char *func, u32 polarity)
++static int skl_int3472_map_gpio_to_sensor(struct int3472_device *int3472,
++					  struct acpi_resource *ares,
++					  char *func, u32 polarity)
+ {
+ 	char *path = ares->data.gpio.resource_source.string_ptr;
+ 	struct int3472_sensor_config *sensor_config;
+@@ -143,7 +143,7 @@ static int int3472_map_gpio_to_sensor(struct int3472_device *int3472,
+ 	acpi_status status;
+ 	int ret;
+ 
+-	sensor_config = int3472_get_sensor_module_config(int3472);
++	sensor_config = skl_int3472_get_sensor_module_config(int3472);
+ 	if (!IS_ERR(sensor_config) && sensor_config->function_maps) {
+ 		unsigned int i = 0;
+ 
+@@ -186,17 +186,19 @@ static int int3472_map_gpio_to_sensor(struct int3472_device *int3472,
+ 	return 0;
+ }
+ 
+-static int int3472_register_clock(struct int3472_device *int3472,
+-				  struct acpi_resource *ares)
++static int skl_int3472_register_clock(struct int3472_device *int3472,
++				      struct acpi_resource *ares)
+ {
+ 	char *path = ares->data.gpio.resource_source.string_ptr;
+-	struct clk_init_data init = { };
++	struct clk_init_data init = { 0 };
+ 	int ret = 0;
+ 
+-	init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(int3472->adev));
++	init.name = kasprintf(GFP_KERNEL, "%s-clk",
++			      acpi_dev_name(int3472->adev));
+ 	init.ops = &skl_int3472_clock_ops;
+ 
+-	int3472->clock.gpio = acpi_get_gpiod(path, ares->data.gpio.pin_table[0]);
++	int3472->clock.gpio = acpi_get_gpiod(path,
++					     ares->data.gpio.pin_table[0]);
+ 	if (IS_ERR(int3472->clock.gpio)) {
+ 		ret = PTR_ERR(int3472->clock.gpio);
+ 		goto out_free_init_name;
+@@ -226,17 +228,16 @@ static int int3472_register_clock(struct int3472_device *int3472,
+ 	return ret;
+ }
+ 
+-static int int3472_register_regulator(struct int3472_device *int3472,
+-				      struct acpi_resource *ares)
++static int skl_int3472_register_regulator(struct int3472_device *int3472,
++					  struct acpi_resource *ares)
+ {
+ 	char *path = ares->data.gpio.resource_source.string_ptr;
+ 	struct int3472_sensor_config *sensor_config;
+ 	struct regulator_init_data init_data = { };
+-	struct int3472_gpio_regulator *regulator;
+ 	struct regulator_config cfg = { };
+ 	int ret;
+ 
+-	sensor_config = int3472_get_sensor_module_config(int3472);
++	sensor_config = skl_int3472_get_sensor_module_config(int3472);
+ 	if (IS_ERR_OR_NULL(sensor_config)) {
+ 		dev_err(&int3472->pdev->dev, "No sensor module config\n");
+ 		return PTR_ERR(sensor_config);
+@@ -252,26 +253,29 @@ static int int3472_register_regulator(struct int3472_device *int3472,
+ 	init_data.num_consumer_supplies = 1;
+ 	init_data.consumer_supplies = &sensor_config->supply_map;
+ 
+-	snprintf(int3472->regulator.regulator_name, GPIO_REGULATOR_NAME_LENGTH,
+-		 "int3472-discrete-regulator");
+-	snprintf(int3472->regulator.supply_name, GPIO_REGULATOR_SUPPLY_NAME_LENGTH,
+-		 "supply-0");
++	snprintf(int3472->regulator.regulator_name,
++		 GPIO_REGULATOR_NAME_LENGTH, "int3472-discrete-regulator");
++	snprintf(int3472->regulator.supply_name,
++		 GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0");
+ 
+-	int3472->regulator.rdesc = INT3472_REGULATOR(int3472->regulator.regulator_name,
+-						     int3472->regulator.supply_name,
+-						     &int3472_gpio_regulator_ops);
++	int3472->regulator.rdesc = INT3472_REGULATOR(
++						int3472->regulator.regulator_name,
++						int3472->regulator.supply_name,
++						&int3472_gpio_regulator_ops);
+ 
+-	int3472->regulator.gpio = acpi_get_gpiod(path, ares->data.gpio.pin_table[0]);
++	int3472->regulator.gpio = acpi_get_gpiod(path,
++						 ares->data.gpio.pin_table[0]);
+ 	if (IS_ERR(int3472->regulator.gpio)) {
+-		ret = PTR_ERR(int3472->regulator.gpio);
+-		goto err_free_regulator;
++		dev_err(&int3472->pdev->dev, "Failed to get GPIO line\n");
++		return PTR_ERR(int3472->regulator.gpio);
+ 	}
+ 
+ 	cfg.dev = &int3472->adev->dev;
+ 	cfg.init_data = &init_data;
+ 	cfg.ena_gpiod = int3472->regulator.gpio;
+ 
+-	int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc, &cfg);
++	int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc,
++						     &cfg);
+ 	if (IS_ERR(int3472->regulator.rdev)) {
+ 		ret = PTR_ERR(int3472->regulator.rdev);
+ 		goto err_free_gpio;
+@@ -280,15 +284,13 @@ static int int3472_register_regulator(struct int3472_device *int3472,
+ 	return 0;
+ 
+ err_free_gpio:
+-	gpiod_put(regulator->gpio);
+-err_free_regulator:
+-	kfree(regulator);
++	gpiod_put(int3472->regulator.gpio);
+ 
+ 	return ret;
+ }
+ 
+ /**
+- * int3472_handle_gpio_resources: maps PMIC resources to consuming sensor
++ * skl_int3472_handle_gpio_resources: maps PMIC resources to consuming sensor
+  * @ares: A pointer to a &struct acpi_resource
+  * @data: A pointer to a &struct int3472_device
+  *
+@@ -305,8 +307,9 @@ static int int3472_register_regulator(struct int3472_device *int3472,
+  *
+  * There are some known platform specific quirks where that does not quite
+  * hold up; for example where a pin with type 0x01 (Power down) is mapped to
+- * a sensor pin that performs a reset function. These will be handled by the
+- * mapping sub-functions.
++ * a sensor pin that performs a reset function or entries in _CRS and _DSM that
++ * do not actually correspond to a physical connection. These will be handled by
++ * the mapping sub-functions.
+  *
+  * GPIOs will either be mapped directly to the sensor device or else used
+  * to create clocks and regulators via the usual frameworks.
+@@ -317,8 +320,8 @@ static int int3472_register_regulator(struct int3472_device *int3472,
+  * * -ENODEV	- If the resource has no corresponding _DSM entry
+  * * -Other	- Errors propagated from one of the sub-functions.
+  */
+-static int int3472_handle_gpio_resources(struct acpi_resource *ares,
+-					 void *data)
++static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
++					     void *data)
+ {
+ 	struct int3472_device *int3472 = data;
+ 	union acpi_object *obj;
+@@ -345,30 +348,30 @@ static int int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 
+ 	switch (obj->integer.value & 0xff) {
+ 	case INT3472_GPIO_TYPE_RESET:
+-		ret = int3472_map_gpio_to_sensor(int3472, ares, "reset",
+-						 GPIO_ACTIVE_LOW);
++		ret = skl_int3472_map_gpio_to_sensor(int3472, ares, "reset",
++						     GPIO_ACTIVE_LOW);
+ 		if (ret)
+ 			dev_err(&int3472->pdev->dev,
+ 				"Failed to map reset pin to sensor\n");
+ 
+ 		break;
+ 	case INT3472_GPIO_TYPE_POWERDOWN:
+-		ret = int3472_map_gpio_to_sensor(int3472, ares, "powerdown",
+-						 GPIO_ACTIVE_LOW);
++		ret = skl_int3472_map_gpio_to_sensor(int3472, ares, "powerdown",
++						     GPIO_ACTIVE_LOW);
+ 		if (ret)
+ 			dev_err(&int3472->pdev->dev,
+ 				"Failed to map powerdown pin to sensor\n");
+ 
+ 		break;
+ 	case INT3472_GPIO_TYPE_CLK_ENABLE:
+-		ret = int3472_register_clock(int3472, ares);
++		ret = skl_int3472_register_clock(int3472, ares);
+ 		if (ret)
+ 			dev_err(&int3472->pdev->dev,
+ 				"Failed to map clock to sensor\n");
+ 
+ 		break;
+ 	case INT3472_GPIO_TYPE_POWER_ENABLE:
+-		ret = int3472_register_regulator(int3472, ares);
++		ret = skl_int3472_register_regulator(int3472, ares);
+ 		if (ret) {
+ 			dev_err(&int3472->pdev->dev,
+ 				"Failed to map regulator to sensor\n");
+@@ -376,8 +379,9 @@ static int int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 
+ 		break;
+ 	case INT3472_GPIO_TYPE_PRIVACY_LED:
+-		ret = int3472_map_gpio_to_sensor(int3472, ares, "indicator-led",
+-						 GPIO_ACTIVE_HIGH);
++		ret = skl_int3472_map_gpio_to_sensor(int3472, ares,
++						     "indicator-led",
++						     GPIO_ACTIVE_HIGH);
+ 		if (ret)
+ 			dev_err(&int3472->pdev->dev,
+ 				"Failed to map indicator led to sensor\n");
+@@ -396,7 +400,7 @@ static int int3472_handle_gpio_resources(struct acpi_resource *ares,
+ 	return ret;
+ }
+ 
+-static int int3472_parse_crs(struct int3472_device *int3472)
++static int skl_int3472_parse_crs(struct int3472_device *int3472)
+ {
+ 	struct list_head resource_list;
+ 	int ret = 0;
+@@ -404,7 +408,8 @@ static int int3472_parse_crs(struct int3472_device *int3472)
+ 	INIT_LIST_HEAD(&resource_list);
+ 
+ 	ret = acpi_dev_get_resources(int3472->adev, &resource_list,
+-				     int3472_handle_gpio_resources, int3472);
++				     skl_int3472_handle_gpio_resources,
++				     int3472);
+ 
+ 	if (!ret) {
+ 		gpiod_add_lookup_table(&int3472->gpios);
+@@ -423,7 +428,7 @@ int skl_int3472_discrete_probe(struct platform_device *pdev)
+ 	struct int3472_cldb cldb;
+ 	int ret = 0;
+ 
+-	ret = skl_int3472_get_cldb_buffer(adev, &cldb);
++	ret = skl_int3472_fill_cldb(adev, &cldb);
+ 	if (ret || cldb.control_logic_type != 1)
+ 		return -EINVAL;
+ 
+@@ -444,10 +449,10 @@ int skl_int3472_discrete_probe(struct platform_device *pdev)
+ 		ret = -ENODEV;
+ 		goto err_free_int3472;
+ 	}
+-	int3472->sensor_name = i2c_acpi_dev_name(int3472->sensor);
++	int3472->sensor_name = kasprintf(GFP_KERNEL, I2C_DEV_NAME_FORMAT, acpi_dev_name(int3472->sensor));
+ 	int3472->gpios.dev_id = int3472->sensor_name;
+ 
+-	ret = int3472_parse_crs(int3472);
++	ret = skl_int3472_parse_crs(int3472);
+ 	if (ret) {
+ 		skl_int3472_discrete_remove(pdev);
+ 		goto err_return_ret;
+diff --git a/drivers/platform/x86/intel_skl_int3472_tps68470.c b/drivers/platform/x86/intel_skl_int3472_tps68470.c
+index 3fe27ec0caff..40629291b339 100644
+--- a/drivers/platform/x86/intel_skl_int3472_tps68470.c
++++ b/drivers/platform/x86/intel_skl_int3472_tps68470.c
+@@ -87,20 +87,20 @@ int skl_int3472_tps68470_probe(struct i2c_client *client)
+ 
+ 	/*
+ 	 * Check CLDB buffer against the PMIC's adev. If present, then we check
+-	 * the value of control_logic_type field and follow one of the following
+-	 * scenarios:
++	 * the value of control_logic_type field and follow one of the
++	 * following scenarios:
+ 	 *
+-	 *	1. No CLDB - likely ACPI tables designed for ChromeOS. We create
+-	 *	platform devices for the GPIOs and OpRegion drivers.
++	 *	1. No CLDB - likely ACPI tables designed for ChromeOS. We
++	 *	create platform devices for the GPIOs and OpRegion drivers.
+ 	 *
+-	 *	2. CLDB, with control_logic_type = 2 - probably ACPI tables made
+-	 *	for Windows 2-in-1 platforms. Register pdevs for GPIO, Clock and
+-	 *	Regulator drivers to bind to.
++	 *	2. CLDB, with control_logic_type = 2 - probably ACPI tables
++	 *	made for Windows 2-in-1 platforms. Register pdevs for GPIO,
++	 *	Clock and Regulator drivers to bind to.
+ 	 *
+ 	 *	3. Any other value in control_logic_type, we should never have
+ 	 *	gotten to this point; crash and burn.
+ 	 */
+-	ret = skl_int3472_get_cldb_buffer(adev, &cldb);
++	ret = skl_int3472_fill_cldb(adev, &cldb);
+ 	if (!ret && cldb.control_logic_type != 2)
+ 		return -EINVAL;
+ 
+-- 
+2.30.1
+
+From 75b63fc7f75d662d516d8120c3a870bd781e14c7 Mon Sep 17 00:00:00 2001
+From: Daniel Scally <djrscally@gmail.com>
+Date: Sat, 23 Jan 2021 00:30:15 +0000
+Subject: [PATCH] platform: x86: Add recalc_rate opp to int3472-discrete clock
+
+This commit adds the recalc_rate opp to the clock registered by
+int3472-discrete so that sensor drivers calling clk_get_rate() will get a
+valid value returned.
+
+The value is simply read from the sensor's SSDB buffer, and so we pass
+CLK_GET_RATE_NOCACHE
+
+Signed-off-by: Daniel Scally <djrscally@gmail.com>
+Patchset: cameras
+---
+ .../platform/x86/intel_skl_int3472_common.h   |  6 +++
+ .../platform/x86/intel_skl_int3472_discrete.c | 37 ++++++++++++++++++-
+ 2 files changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/intel_skl_int3472_common.h b/drivers/platform/x86/intel_skl_int3472_common.h
+index e1083bb67dc6..860c849b7769 100644
+--- a/drivers/platform/x86/intel_skl_int3472_common.h
++++ b/drivers/platform/x86/intel_skl_int3472_common.h
+@@ -17,6 +17,8 @@
+ #define GPIO_REGULATOR_NAME_LENGTH				27
+ #define GPIO_REGULATOR_SUPPLY_NAME_LENGTH			9
+ 
++#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET			86
++
+ #define INT3472_REGULATOR(_NAME, _SUPPLY, _OPS)			\
+ 	(const struct regulator_desc) {				\
+ 		.name = _NAME,					\
+@@ -36,6 +38,9 @@
+ #define to_int3472_clk(hw)					\
+ 	container_of(hw, struct int3472_gpio_clock, clk_hw)
+ 
++#define to_int3472_device(clk)					\
++	container_of(clk, struct int3472_device, clock)
++
+ struct int3472_cldb {
+ 	u8 version;
+ 	/*
+@@ -62,6 +67,7 @@ struct int3472_gpio_regulator {
+ struct int3472_gpio_clock {
+ 	struct clk *clk;
+ 	struct clk_hw clk_hw;
++	struct clk_lookup *cl;
+ 	struct gpio_desc *gpio;
+ };
+ 
+diff --git a/drivers/platform/x86/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel_skl_int3472_discrete.c
+index 42ae8396eb64..98eb1ec3399e 100644
+--- a/drivers/platform/x86/intel_skl_int3472_discrete.c
++++ b/drivers/platform/x86/intel_skl_int3472_discrete.c
+@@ -86,11 +86,41 @@ static void skl_int3472_clk_unprepare(struct clk_hw *hw)
+ 	/* Likewise, nothing to do here... */
+ }
+ 
++static unsigned int skl_int3472_get_clk_frequency(struct int3472_device *int3472)
++{
++	union acpi_object *obj;
++	unsigned int ret = 0;
++
++	obj = skl_int3472_get_acpi_buffer(int3472->sensor, "SSDB");
++	if (IS_ERR(obj))
++		goto out_free_buff; /* report rate as 0 on error */
++
++	if (obj->buffer.length < CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET + sizeof(u32)) {
++		dev_err(&int3472->pdev->dev, "The buffer is too small\n");
++		goto out_free_buff;
++	}
++
++	ret = *(u32*)(obj->buffer.pointer + CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET);
++
++out_free_buff:
++	kfree(obj);
++	return ret;
++}
++
++static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
++{
++	struct int3472_gpio_clock *clk = to_int3472_clk(hw);
++	struct int3472_device *int3472 = to_int3472_device(clk);
++
++	return skl_int3472_get_clk_frequency(int3472);
++}
++
+ static const struct clk_ops skl_int3472_clock_ops = {
+ 	.prepare = skl_int3472_clk_prepare,
+ 	.unprepare = skl_int3472_clk_unprepare,
+ 	.enable = skl_int3472_clk_enable,
+ 	.disable = skl_int3472_clk_disable,
++	.recalc_rate = skl_int3472_clk_recalc_rate,
+ };
+ 
+ static struct int3472_sensor_config *
+@@ -196,6 +226,7 @@ static int skl_int3472_register_clock(struct int3472_device *int3472,
+ 	init.name = kasprintf(GFP_KERNEL, "%s-clk",
+ 			      acpi_dev_name(int3472->adev));
+ 	init.ops = &skl_int3472_clock_ops;
++	init.flags |= CLK_GET_RATE_NOCACHE;
+ 
+ 	int3472->clock.gpio = acpi_get_gpiod(path,
+ 					     ares->data.gpio.pin_table[0]);
+@@ -212,8 +243,9 @@ static int skl_int3472_register_clock(struct int3472_device *int3472,
+ 		goto err_put_gpio;
+ 	}
+ 
+-	ret = clk_register_clkdev(int3472->clock.clk, "xvclk", int3472->sensor_name);
+-	if (ret)
++	int3472->clock.cl = clkdev_create(int3472->clock.clk, "xvclk",
++					  int3472->sensor_name);
++	if (IS_ERR_OR_NULL(int3472->clock.cl))
+ 		goto err_unregister_clk;
+ 
+ 	goto out_free_init_name;
+@@ -483,6 +515,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
+ 	if (!IS_ERR_OR_NULL(int3472->clock.clk)) {
+ 		gpiod_put(int3472->clock.gpio);
+ 		clk_unregister(int3472->clock.clk);
++		clkdev_drop(int3472->clock.cl);
+ 	}
+ 
+ 	acpi_dev_put(int3472->sensor);
+-- 
+2.30.1
+
+From 0b11240196604cd0bd721b4a46180fe7823ee06b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fabian=20W=C3=BCthrich?= <me@fabwu.ch>
+Date: Sun, 24 Jan 2021 11:07:42 +0100
+Subject: [PATCH] cio2-bridge: Use macros and add warnings
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use macros for the _PLD panel as defined in the ACPI spec 6.3 and emit
+a warning if we see an unknown value.
+
+Signed-off-by: Fabian Wüthrich <me@fabwu.ch>
+Patchset: cameras
+---
+ drivers/media/pci/intel/ipu3/cio2-bridge.c | 33 ++++++++++++++++------
+ drivers/media/pci/intel/ipu3/cio2-bridge.h | 13 +++++++++
+ 2 files changed, 37 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+index 806d4e5fc177..3c373ad1c0b0 100644
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
+@@ -73,21 +73,36 @@ static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ 	return ret;
+ }
+ 
+-static u32 cio2_bridge_parse_rotation(u8 rotation)
++static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
+ {
+-	if (rotation == 1)
++	switch (sensor->ssdb.degree) {
++	case CIO2_SENSOR_ROTATION_NORMAL:
++		return 0;
++	case CIO2_SENSOR_ROTATION_INVERTED:
+ 		return 180;
+-	return 0;
++	default:
++		dev_warn(&sensor->adev->dev,
++			 "Unknown rotation %d. Assume 0 degree rotation\n",
++			 sensor->ssdb.degree);
++		return 0;
++	}
+ }
+ 
+-static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(u8 panel)
++static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
+ {
+-	switch (panel) {
+-	case 4:
++	switch (sensor->pld->panel) {
++	case CIO2_PLD_PANEL_FRONT:
+ 		return V4L2_FWNODE_ORIENTATION_FRONT;
+-	case 5:
++	case CIO2_PLD_PANEL_BACK:
+ 		return V4L2_FWNODE_ORIENTATION_BACK;
++	case CIO2_PLD_PANEL_TOP:
++	case CIO2_PLD_PANEL_LEFT:
++	case CIO2_PLD_PANEL_RIGHT:
++	case CIO2_PLD_PANEL_UNKNOWN:
++		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ 	default:
++		dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
++			 sensor->pld->panel);
+ 		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ 	}
+ }
+@@ -100,8 +115,8 @@ static void cio2_bridge_create_fwnode_properties(
+ 	u32 rotation;
+ 	enum v4l2_fwnode_orientation orientation;
+ 
+-	rotation = cio2_bridge_parse_rotation(sensor->ssdb.degree);
+-	orientation = cio2_bridge_parse_orientation(sensor->pld->panel);
++	rotation = cio2_bridge_parse_rotation(sensor);
++	orientation = cio2_bridge_parse_orientation(sensor);
+ 
+ 	sensor->prop_names = prop_names;
+ 
+diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+index 924d99d20328..e1e388cc9f45 100644
+--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
+@@ -12,6 +12,19 @@
+ #define CIO2_MAX_LANES				4
+ #define MAX_NUM_LINK_FREQS			3
+ 
++/* Values are estimated guesses as we don't have a spec */
++#define CIO2_SENSOR_ROTATION_NORMAL		0
++#define CIO2_SENSOR_ROTATION_INVERTED		1
++
++/* Panel position defined in _PLD section of ACPI Specification 6.3 */
++#define CIO2_PLD_PANEL_TOP			0
++#define CIO2_PLD_PANEL_BOTTOM			1
++#define CIO2_PLD_PANEL_LEFT			2
++#define CIO2_PLD_PANEL_RIGHT			3
++#define CIO2_PLD_PANEL_FRONT			4
++#define CIO2_PLD_PANEL_BACK			5
++#define CIO2_PLD_PANEL_UNKNOWN			6
++
+ #define CIO2_SENSOR_CONFIG(_HID, _NR, ...)	\
+ 	(const struct cio2_sensor_config) {	\
+ 		.hid = _HID,			\
+-- 
+2.30.1
+