Browse Source

Add patches for v5.9

Links:
- SAM: https://github.com/linux-surface/surface-aggregator-module/commit/af4bb01042d8ab707d8a73d4ee7ff770223a1c2f
- IPTS: https://github.com/linux-surface/intel-precise-touch/commit/0a4a44c2a9b676bd25d1cd916118dcfe3f447849
- kernel: https://github.com/linux-surface/kernel/commit/d4c72c439f9a34ebda74fafe1be22795154ae366
Maximilian Luz 4 years ago
parent
commit
ea2ecf7a1d

+ 101 - 0
patches/5.9/0001-surface3-oemb.patch

@@ -0,0 +1,101 @@
+From 98d612006b94231362c3b5f16deb4a8e9a4ea4d0 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 18 Oct 2020 16:42:44 +0900
+Subject: [PATCH] (surface3-oemb) add DMI matches for Surface 3 with broken DMI
+ table
+
+On some Surface 3, the DMI table gets corrupted for unknown reasons
+and breaks existing DMI matching used for device-specific quirks.
+
+This commit adds the (broken) DMI data into dmi_system_id tables used
+for quirks so that each driver can enable quirks even on the affected
+systems.
+
+On affected systems, DMI data will look like this:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:OEMB
+    /sys/devices/virtual/dmi/id/board_vendor:OEMB
+    /sys/devices/virtual/dmi/id/chassis_vendor:OEMB
+    /sys/devices/virtual/dmi/id/product_name:OEMB
+    /sys/devices/virtual/dmi/id/sys_vendor:OEMB
+
+Expected:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:Surface 3
+    /sys/devices/virtual/dmi/id/board_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/chassis_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/product_name:Surface 3
+    /sys/devices/virtual/dmi/id/sys_vendor:Microsoft Corporation
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: surface3-oemb
+---
+ drivers/platform/x86/surface3-wmi.c               | 7 +++++++
+ sound/soc/codecs/rt5645.c                         | 9 +++++++++
+ sound/soc/intel/common/soc-acpi-intel-cht-match.c | 8 ++++++++
+ 3 files changed, 24 insertions(+)
+
+diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
+index 130b6f52a600..801083aa56d6 100644
+--- a/drivers/platform/x86/surface3-wmi.c
++++ b/drivers/platform/x86/surface3-wmi.c
+@@ -37,6 +37,13 @@ static const struct dmi_system_id surface3_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 420003d062c7..217e488cd4fa 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3687,6 +3687,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		},
+ 		.driver_data = (void *)&intel_braswell_platform_data,
+ 	},
++	{
++		.ident = "Microsoft Surface 3",
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++		.driver_data = (void *)&intel_braswell_platform_data,
++	},
+ 	{
+ 		/*
+ 		 * Match for the GPDwin which unfortunately uses somewhat
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index 2752dc955733..ef36a316e2ed 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -27,6 +27,14 @@ static const struct dmi_system_id cht_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+ 	},
++	{
++		.callback = cht_surface_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++	},
+ 	{ }
+ };
+ 
+-- 
+2.28.0
+

+ 1376 - 0
patches/5.9/0002-wifi.patch

@@ -0,0 +1,1376 @@
+From 1040020890118b08d89401905c6c3c63f0127ccf Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Thu, 24 Sep 2020 18:02:06 +0900
+Subject: [PATCH] mwifiex: pcie: skip cancel_work_sync() on reset failure path
+
+If a reset is performed, but even the reset fails for some reasons (e.g.,
+on Surface devices, the fw reset requires another quirks),
+cancel_work_sync() hangs in mwifiex_cleanup_pcie().
+
+    # reset performed after firmware went into bad state
+    kernel: mwifiex_pcie 0000:01:00.0: WLAN FW already running! Skip FW dnld
+    kernel: mwifiex_pcie 0000:01:00.0: WLAN FW is active
+    # but even the reset failed
+    kernel: mwifiex_pcie 0000:01:00.0: mwifiex_cmd_timeout_func: Timeout cmd id = 0xfa, act = 0xa000
+    kernel: mwifiex_pcie 0000:01:00.0: num_data_h2c_failure = 0
+    kernel: mwifiex_pcie 0000:01:00.0: num_cmd_h2c_failure = 0
+    kernel: mwifiex_pcie 0000:01:00.0: is_cmd_timedout = 1
+    kernel: mwifiex_pcie 0000:01:00.0: num_tx_timeout = 0
+    kernel: mwifiex_pcie 0000:01:00.0: last_cmd_index = 2
+    kernel: mwifiex_pcie 0000:01:00.0: last_cmd_id: 16 00 a4 00 fa 00 a4 00 7f 00
+    kernel: mwifiex_pcie 0000:01:00.0: last_cmd_act: 00 00 00 00 00 a0 00 00 00 00
+    kernel: mwifiex_pcie 0000:01:00.0: last_cmd_resp_index = 0
+    kernel: mwifiex_pcie 0000:01:00.0: last_cmd_resp_id: 16 80 7f 80 16 80 a4 80 7f 80
+    kernel: mwifiex_pcie 0000:01:00.0: last_event_index = 1
+    kernel: mwifiex_pcie 0000:01:00.0: last_event: 58 00 58 00 58 00 58 00 58 00
+    kernel: mwifiex_pcie 0000:01:00.0: data_sent=0 cmd_sent=1
+    kernel: mwifiex_pcie 0000:01:00.0: ps_mode=0 ps_state=0
+    kernel: mwifiex_pcie 0000:01:00.0: info: _mwifiex_fw_dpc: unregister device
+    # mwifiex_pcie_work hanged
+    kernel: INFO: task kworker/0:0:24857 blocked for more than 122 seconds.
+    kernel:       Tainted: G        W  OE     5.3.11-arch1-1 #1
+    kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+    kernel: kworker/0:0     D    0 24857      2 0x80004000
+    kernel: Workqueue: events mwifiex_pcie_work [mwifiex_pcie]
+    kernel: Call Trace:
+    kernel:  ? __schedule+0x27f/0x6d0
+    kernel:  schedule+0x43/0xd0
+    kernel:  schedule_timeout+0x299/0x3d0
+    kernel:  ? __switch_to_asm+0x40/0x70
+    kernel:  wait_for_common+0xeb/0x190
+    kernel:  ? wake_up_q+0x60/0x60
+    kernel:  __flush_work+0x130/0x1e0
+    kernel:  ? flush_workqueue_prep_pwqs+0x130/0x130
+    kernel:  __cancel_work_timer+0x123/0x1b0
+    kernel:  mwifiex_cleanup_pcie+0x28/0xd0 [mwifiex_pcie]
+    kernel:  mwifiex_free_adapter+0x24/0xe0 [mwifiex]
+    kernel:  _mwifiex_fw_dpc+0x28d/0x520 [mwifiex]
+    kernel:  mwifiex_reinit_sw+0x15d/0x2c0 [mwifiex]
+    kernel:  mwifiex_pcie_reset_done+0x50/0x80 [mwifiex_pcie]
+    kernel:  pci_try_reset_function+0x38/0x70
+    kernel:  process_one_work+0x1d1/0x3a0
+    kernel:  worker_thread+0x4a/0x3d0
+    kernel:  kthread+0xfb/0x130
+    kernel:  ? process_one_work+0x3a0/0x3a0
+    kernel:  ? kthread_park+0x80/0x80
+    kernel:  ret_from_fork+0x35/0x40
+
+This is a deadlock caused by calling cancel_work_sync() in
+mwifiex_cleanup_pcie():
+
+- Device resets are done via mwifiex_pcie_card_reset()
+- which schedules card->work to call mwifiex_pcie_card_reset_work()
+- which calls pci_try_reset_function().
+- This leads to mwifiex_pcie_reset_done() be called on the same workqueue,
+  which in turn calls
+- mwifiex_reinit_sw() and that calls
+- _mwifiex_fw_dpc().
+
+The problem is now that _mwifiex_fw_dpc() calls mwifiex_free_adapter()
+in case firmware initialization fails. That ends up calling
+mwifiex_cleanup_pcie().
+
+Note that all those calls are still running on the workqueue. So when
+mwifiex_cleanup_pcie() now calls cancel_work_sync(), it's really waiting
+on itself to complete, causing a deadlock.
+
+This commit fixes the deadlock by skipping cancel_work_sync() on a reset
+failure path.
+
+After this commit, when reset fails, the following output is
+expected to be shown:
+
+    kernel: mwifiex_pcie 0000:03:00.0: info: _mwifiex_fw_dpc: unregister device
+    kernel: mwifiex: Failed to bring up adapter: -5
+    kernel: mwifiex_pcie 0000:03:00.0: reinit failed: -5
+
+To reproduce this issue, for example, try putting the root port of wifi
+into D3 (replace "00:1d.3" with your setup).
+
+    # put into D3 (root port)
+    sudo setpci -v -s 00:1d.3 CAP_PM+4.b=0b
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 18 +++++++++++++++++-
+ drivers/net/wireless/marvell/mwifiex/pcie.h |  2 ++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 87b4ccca4b9a..00138d6129f4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -377,6 +377,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
+ 	clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ 	clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
+ 	mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
++
++	card->pci_reset_ongoing = true;
+ }
+ 
+ /*
+@@ -405,6 +407,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
+ 		dev_err(&pdev->dev, "reinit failed: %d\n", ret);
+ 	else
+ 		mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
++
++	card->pci_reset_ongoing = false;
+ }
+ 
+ static const struct pci_error_handlers mwifiex_pcie_err_handler = {
+@@ -2995,7 +2999,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
+ 	int ret;
+ 	u32 fw_status;
+ 
+-	cancel_work_sync(&card->work);
++	/* Perform the cancel_work_sync() only when we're not resetting
++	 * the card. It's because that function never returns if we're
++	 * in reset path. If we're here when resetting the card, it means
++	 * that we failed to reset the card (reset failure path).
++	 */
++	if (!card->pci_reset_ongoing) {
++		mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
++		cancel_work_sync(&card->work);
++		mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
++	} else {
++		mwifiex_dbg(adapter, MSG,
++			    "skipped cancel_work_sync() because we're in card reset failure path\n");
++	}
+ 
+ 	ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
+ 	if (fw_status == FIRMWARE_READY_PCIE) {
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
+index fc59b522f670..048f4db6027a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
+@@ -391,6 +391,8 @@ struct pcie_service_card {
+ 	struct mwifiex_msix_context share_irq_ctx;
+ 	struct work_struct work;
+ 	unsigned long work_flags;
++
++	bool pci_reset_ongoing;
+ };
+ 
+ static inline int
+-- 
+2.28.0
+
+From 36c7b429bd54df9e55fbdc653139d4baf16c9953 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Mon, 28 Sep 2020 17:46:49 +0900
+Subject: [PATCH] mwifiex: pcie: add DMI-based quirk impl for Surface devices
+
+This commit adds quirk implementation based on DMI matching with DMI
+table for Surface devices.
+
+This implementation can be used for quirks later.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/Makefile |   1 +
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |   4 +
+ drivers/net/wireless/marvell/mwifiex/pcie.h   |   1 +
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 114 ++++++++++++++++++
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  11 ++
+ 5 files changed, 131 insertions(+)
+ create mode 100644 drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+ create mode 100644 drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile
+index fdfd9bf15ed4..8a1e7c5b9c6e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/Makefile
++++ b/drivers/net/wireless/marvell/mwifiex/Makefile
+@@ -49,6 +49,7 @@ mwifiex_sdio-y += sdio.o
+ obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
+ 
+ mwifiex_pcie-y += pcie.o
++mwifiex_pcie-y += pcie_quirks.o
+ obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
+ 
+ mwifiex_usb-y += usb.o
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 00138d6129f4..899ce2657880 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -27,6 +27,7 @@
+ #include "wmm.h"
+ #include "11n.h"
+ #include "pcie.h"
++#include "pcie_quirks.h"
+ 
+ #define PCIE_VERSION	"1.0"
+ #define DRV_NAME        "Marvell mwifiex PCIe"
+@@ -261,6 +262,9 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 			return ret;
+ 	}
+ 
++	/* check quirks */
++	mwifiex_initialize_quirks(card);
++
+ 	if (mwifiex_add_card(card, &card->fw_done, &pcie_ops,
+ 			     MWIFIEX_PCIE, &pdev->dev)) {
+ 		pr_err("%s failed\n", __func__);
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
+index 048f4db6027a..51566380f8da 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
+@@ -393,6 +393,7 @@ struct pcie_service_card {
+ 	unsigned long work_flags;
+ 
+ 	bool pci_reset_ongoing;
++	unsigned long quirks;
+ };
+ 
+ static inline int
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+new file mode 100644
+index 000000000000..929aee2b0a60
+--- /dev/null
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -0,0 +1,114 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * File for PCIe quirks.
++ */
++
++/* The low-level PCI operations will be performed in this file. Therefore,
++ * let's use dev_*() instead of mwifiex_dbg() here to avoid troubles (e.g.
++ * to avoid using mwifiex_adapter struct before init or wifi is powered
++ * down, or causes NULL ptr deref).
++ */
++
++#include <linux/dmi.h>
++
++#include "pcie_quirks.h"
++
++/* quirk table based on DMI matching */
++static const struct dmi_system_id mwifiex_quirk_table[] = {
++	{
++		.ident = "Surface Pro 4",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 5",
++		.matches = {
++			/* match for SKU here due to generic product name "Surface Pro" */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 5 (LTE)",
++		.matches = {
++			/* match for SKU here due to generic product name "Surface Pro" */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 6",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Book 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Book 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Laptop 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Laptop 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
++		},
++		.driver_data = 0,
++	},
++	{
++		.ident = "Surface Pro 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 3"),
++		},
++		.driver_data = 0,
++	},
++	{}
++};
++
++void mwifiex_initialize_quirks(struct pcie_service_card *card)
++{
++	struct pci_dev *pdev = card->dev;
++	const struct dmi_system_id *dmi_id;
++
++	dmi_id = dmi_first_match(mwifiex_quirk_table);
++	if (dmi_id)
++		card->quirks = (uintptr_t)dmi_id->driver_data;
++
++	if (!card->quirks)
++		dev_info(&pdev->dev, "no quirks enabled\n");
++}
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+new file mode 100644
+index 000000000000..5326ae7e5671
+--- /dev/null
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -0,0 +1,11 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Header file for PCIe quirks.
++ */
++
++#include "pcie.h"
++
++/* quirks */
++// quirk flags can be added here
++
++void mwifiex_initialize_quirks(struct pcie_service_card *card);
+-- 
+2.28.0
+
+From e4cedba33eff73175314de2c93bd8c2ddf01e441 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Tue, 29 Sep 2020 17:25:22 +0900
+Subject: [PATCH] mwifiex: pcie: add reset_d3cold quirk for Surface gen4+
+ devices
+
+To reset mwifiex on Surface gen4+ (Pro 4 or later gen) devices, it
+seems that putting the wifi device into D3cold is required according
+to errata.inf file on Windows installation (Windows/INF/errata.inf).
+
+This patch adds a function that performs power-cycle (put into D3cold
+then D0) and call the function at the end of reset_prepare().
+
+Note: Need to also reset the parent device (bridge) of wifi on SB1;
+it might be because the bridge of wifi always reports it's in D3hot.
+When I tried to reset only the wifi device (not touching parent), it gave
+the following error and the reset failed:
+
+    acpi device:4b: Cannot transition to power state D0 for parent in D3hot
+    mwifiex_pcie 0000:03:00.0: can't change power state from D3cold to D0 (config space inaccessible)
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |  7 ++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 73 +++++++++++++++++--
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  3 +-
+ 3 files changed, 74 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 899ce2657880..45488c2bc1c1 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -380,6 +380,13 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
+ 	mwifiex_shutdown_sw(adapter);
+ 	clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ 	clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
++
++	/* For Surface gen4+ devices, we need to put wifi into D3cold right
++	 * before performing FLR
++	 */
++	if (card->quirks & QUIRK_FW_RST_D3COLD)
++		mwifiex_pcie_reset_d3cold_quirk(pdev);
++
+ 	mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ 
+ 	card->pci_reset_ongoing = true;
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index 929aee2b0a60..edc739c542fe 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -21,7 +21,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5",
+@@ -30,7 +30,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5 (LTE)",
+@@ -39,7 +39,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 6",
+@@ -47,7 +47,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Book 1",
+@@ -55,7 +55,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Book 2",
+@@ -63,7 +63,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 1",
+@@ -71,7 +71,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 2",
+@@ -79,7 +79,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
+ 	},
+ 	{
+ 		.ident = "Surface 3",
+@@ -111,4 +111,61 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 
+ 	if (!card->quirks)
+ 		dev_info(&pdev->dev, "no quirks enabled\n");
++	if (card->quirks & QUIRK_FW_RST_D3COLD)
++		dev_info(&pdev->dev, "quirk reset_d3cold enabled\n");
++}
++
++static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
++{
++	dev_info(&pdev->dev, "putting into D3cold...\n");
++
++	pci_save_state(pdev);
++	if (pci_is_enabled(pdev))
++		pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3cold);
++}
++
++static int mwifiex_pcie_set_power_d0(struct pci_dev *pdev)
++{
++	int ret;
++
++	dev_info(&pdev->dev, "putting into D0...\n");
++
++	pci_set_power_state(pdev, PCI_D0);
++	ret = pci_enable_device(pdev);
++	if (ret) {
++		dev_err(&pdev->dev, "pci_enable_device failed\n");
++		return ret;
++	}
++	pci_restore_state(pdev);
++
++	return 0;
++}
++
++int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev)
++{
++	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
++	int ret;
++
++	/* Power-cycle (put into D3cold then D0) */
++	dev_info(&pdev->dev, "Using reset_d3cold quirk to perform FW reset\n");
++
++	/* We need to perform power-cycle also for bridge of wifi because
++	 * on some devices (e.g. Surface Book 1), the OS for some reasons
++	 * can't know the real power state of the bridge.
++	 * When tried to power-cycle only wifi, the reset failed with the
++	 * following dmesg log:
++	 * "Cannot transition to power state D0 for parent in D3hot".
++	 */
++	mwifiex_pcie_set_power_d3cold(pdev);
++	mwifiex_pcie_set_power_d3cold(parent_pdev);
++
++	ret = mwifiex_pcie_set_power_d0(parent_pdev);
++	if (ret)
++		return ret;
++	ret = mwifiex_pcie_set_power_d0(pdev);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 5326ae7e5671..8b9dcb5070d8 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -6,6 +6,7 @@
+ #include "pcie.h"
+ 
+ /* quirks */
+-// quirk flags can be added here
++#define QUIRK_FW_RST_D3COLD	BIT(0)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
++int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
+-- 
+2.28.0
+
+From acaf58ea2c66fb0e7141952b54ec2f0eb7d6934e Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Tue, 29 Sep 2020 17:32:22 +0900
+Subject: [PATCH] mwifiex: pcie: add reset_wsid quirk for Surface 3
+
+This commit adds reset_wsid quirk and uses this quirk for Surface 3 on
+card reset.
+
+To reset mwifiex on Surface 3, it seems that calling the _DSM method
+exists in \_SB.WSID [1] device is required.
+
+On Surface 3, calling the _DSM method removes/re-probes the card by
+itself. So, need to place the reset function before performing FLR and
+skip performing any other reset-related works.
+
+Note that Surface Pro 3 also has the WSID device [2], but it seems to need
+more work. This commit only supports Surface 3 yet.
+
+[1] https://github.com/linux-surface/acpidumps/blob/05cba925f3a515f222acb5b3551a032ddde958fe/surface_3/dsdt.dsl#L11947-L12011
+[2] https://github.com/linux-surface/acpidumps/blob/05cba925f3a515f222acb5b3551a032ddde958fe/surface_pro_3/dsdt.dsl#L12164-L12216
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   | 10 +++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 77 ++++++++++++++++++-
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  5 ++
+ 3 files changed, 91 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 45488c2bc1c1..daae572ce94e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -2817,6 +2817,16 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
+ {
+ 	struct pcie_service_card *card = adapter->card;
+ 
++	/* On Surface 3, reset_wsid method removes then re-probes card by
++	 * itself. So, need to place it here and skip performing any other
++	 * reset-related works.
++	 */
++	if (card->quirks & QUIRK_FW_RST_WSID_S3) {
++		mwifiex_pcie_reset_wsid_quirk(card->dev);
++		/* skip performing any other reset-related works */
++		return;
++	}
++
+ 	/* We can't afford to wait here; remove() might be waiting on us. If we
+ 	 * can't grab the device lock, maybe we'll get another chance later.
+ 	 */
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index edc739c542fe..f0a6fa0a7ae5 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -9,10 +9,21 @@
+  * down, or causes NULL ptr deref).
+  */
+ 
++#include <linux/acpi.h>
+ #include <linux/dmi.h>
+ 
+ #include "pcie_quirks.h"
+ 
++/* For reset_wsid quirk */
++#define ACPI_WSID_PATH		"\\_SB.WSID"
++#define WSID_REV		0x0
++#define WSID_FUNC_WIFI_PWR_OFF	0x1
++#define WSID_FUNC_WIFI_PWR_ON	0x2
++/* WSID _DSM UUID: "534ea3bf-fcc2-4e7a-908f-a13978f0c7ef" */
++static const guid_t wsid_dsm_guid =
++	GUID_INIT(0x534ea3bf, 0xfcc2, 0x4e7a,
++		  0x90, 0x8f, 0xa1, 0x39, 0x78, 0xf0, 0xc7, 0xef);
++
+ /* quirk table based on DMI matching */
+ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 	{
+@@ -87,7 +98,7 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ 		},
+-		.driver_data = 0,
++		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
+ 	},
+ 	{
+ 		.ident = "Surface Pro 3",
+@@ -113,6 +124,9 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 		dev_info(&pdev->dev, "no quirks enabled\n");
+ 	if (card->quirks & QUIRK_FW_RST_D3COLD)
+ 		dev_info(&pdev->dev, "quirk reset_d3cold enabled\n");
++	if (card->quirks & QUIRK_FW_RST_WSID_S3)
++		dev_info(&pdev->dev,
++			 "quirk reset_wsid for Surface 3 enabled\n");
+ }
+ 
+ static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
+@@ -169,3 +183,64 @@ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev)
+ 
+ 	return 0;
+ }
++
++int mwifiex_pcie_reset_wsid_quirk(struct pci_dev *pdev)
++{
++	acpi_handle handle;
++	union acpi_object *obj;
++	acpi_status status;
++
++	dev_info(&pdev->dev, "Using reset_wsid quirk to perform FW reset\n");
++
++	status = acpi_get_handle(NULL, ACPI_WSID_PATH, &handle);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "No ACPI handle for path %s\n",
++			ACPI_WSID_PATH);
++		return -ENODEV;
++	}
++
++	if (!acpi_has_method(handle, "_DSM")) {
++		dev_err(&pdev->dev, "_DSM method not found\n");
++		return -ENODEV;
++	}
++
++	if (!acpi_check_dsm(handle, &wsid_dsm_guid,
++			    WSID_REV, WSID_FUNC_WIFI_PWR_OFF)) {
++		dev_err(&pdev->dev,
++			"_DSM method doesn't support wifi power off func\n");
++		return -ENODEV;
++	}
++
++	if (!acpi_check_dsm(handle, &wsid_dsm_guid,
++			    WSID_REV, WSID_FUNC_WIFI_PWR_ON)) {
++		dev_err(&pdev->dev,
++			"_DSM method doesn't support wifi power on func\n");
++		return -ENODEV;
++	}
++
++	/* card will be removed immediately after this call on Surface 3 */
++	dev_info(&pdev->dev, "turning wifi off...\n");
++	obj = acpi_evaluate_dsm(handle, &wsid_dsm_guid,
++				WSID_REV, WSID_FUNC_WIFI_PWR_OFF,
++				NULL);
++	if (!obj) {
++		dev_err(&pdev->dev,
++			"device _DSM execution failed for turning wifi off\n");
++		return -EIO;
++	}
++	ACPI_FREE(obj);
++
++	/* card will be re-probed immediately after this call on Surface 3 */
++	dev_info(&pdev->dev, "turning wifi on...\n");
++	obj = acpi_evaluate_dsm(handle, &wsid_dsm_guid,
++				WSID_REV, WSID_FUNC_WIFI_PWR_ON,
++				NULL);
++	if (!obj) {
++		dev_err(&pdev->dev,
++			"device _DSM execution failed for turning wifi on\n");
++		return -EIO;
++	}
++	ACPI_FREE(obj);
++
++	return 0;
++}
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 8b9dcb5070d8..3ef7440418e3 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -7,6 +7,11 @@
+ 
+ /* quirks */
+ #define QUIRK_FW_RST_D3COLD	BIT(0)
++/* Surface 3 and Surface Pro 3 have the same _DSM method but need to
++ * be handled differently. Currently, only S3 is supported.
++ */
++#define QUIRK_FW_RST_WSID_S3	BIT(1)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
+ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
++int mwifiex_pcie_reset_wsid_quirk(struct pci_dev *pdev);
+-- 
+2.28.0
+
+From 4a498b49a7f7141895741fcdd28032b790ff9d35 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Wed, 30 Sep 2020 18:08:24 +0900
+Subject: [PATCH] mwifiex: pcie: (OEMB) add quirk for Surface 3 with broken DMI
+ table
+
+(made referring to http://git.osdn.net/view?p=android-x86/kernel.git;a=commitdiff;h=18e2e857c57633b25b3b4120f212224a108cd883)
+
+On some Surface 3, the DMI table gets corrupted for unknown reasons
+and breaks existing DMI matching used for device-specific quirks.
+
+This commit adds the (broken) DMI info for the affected Surface 3.
+
+On affected systems, DMI info will look like this:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:OEMB
+    /sys/devices/virtual/dmi/id/board_vendor:OEMB
+    /sys/devices/virtual/dmi/id/chassis_vendor:OEMB
+    /sys/devices/virtual/dmi/id/product_name:OEMB
+    /sys/devices/virtual/dmi/id/sys_vendor:OEMB
+
+Expected:
+    $ grep . /sys/devices/virtual/dmi/id/{bios_vendor,board_name,board_vendor,\
+    chassis_vendor,product_name,sys_vendor}
+    /sys/devices/virtual/dmi/id/bios_vendor:American Megatrends Inc.
+    /sys/devices/virtual/dmi/id/board_name:Surface 3
+    /sys/devices/virtual/dmi/id/board_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/chassis_vendor:Microsoft Corporation
+    /sys/devices/virtual/dmi/id/product_name:Surface 3
+    /sys/devices/virtual/dmi/id/sys_vendor:Microsoft Corporation
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie_quirks.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index f0a6fa0a7ae5..34dcd84f02a6 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -100,6 +100,15 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
+ 	},
++	{
++		.ident = "Surface 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OEMB"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OEMB"),
++		},
++		.driver_data = (void *)QUIRK_FW_RST_WSID_S3,
++	},
+ 	{
+ 		.ident = "Surface Pro 3",
+ 		.matches = {
+-- 
+2.28.0
+
+From 28158ef411e579c66eb33cafe1b7af5cd9dbcc89 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Thu, 24 Sep 2020 01:56:29 +0900
+Subject: [PATCH] mwifiex: fix mwifiex_shutdown_sw() causing sw reset failure
+
+When FLR is performed but without fw reset for some reasons (e.g. on
+Surface devices, fw reset requires another quirk), it fails to reset
+properly. You can trigger the issue on such devices via debugfs entry
+for reset:
+
+    $ echo 1 | sudo tee /sys/kernel/debug/mwifiex/mlan0/reset
+
+and the resulting dmesg log:
+
+    mwifiex_pcie 0000:03:00.0: Resetting per request
+    mwifiex_pcie 0000:03:00.0: info: successfully disconnected from [BSSID]: reason code 3
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: info: shutdown mwifiex...
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: PREP_CMD: card is removed
+    mwifiex_pcie 0000:03:00.0: WLAN FW already running! Skip FW dnld
+    mwifiex_pcie 0000:03:00.0: WLAN FW is active
+    mwifiex_pcie 0000:03:00.0: Unknown api_id: 4
+    mwifiex_pcie 0000:03:00.0: info: MWIFIEX VERSION: mwifiex 1.0 (15.68.19.p21)
+    mwifiex_pcie 0000:03:00.0: driver_version = mwifiex 1.0 (15.68.19.p21)
+    mwifiex_pcie 0000:03:00.0: info: trying to associate to '[SSID]' bssid [BSSID]
+    mwifiex_pcie 0000:03:00.0: info: associated to bssid [BSSID] successfully
+    mwifiex_pcie 0000:03:00.0: cmd_wait_q terminated: -110
+    mwifiex_pcie 0000:03:00.0: info: successfully disconnected from [BSSID]: reason code 15
+    mwifiex_pcie 0000:03:00.0: cmd_wait_q terminated: -110
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: cmd_wait_q terminated: -110
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    mwifiex_pcie 0000:03:00.0: cmd_wait_q terminated: -110
+    mwifiex_pcie 0000:03:00.0: deleting the crypto keys
+    [...]
+
+When comparing mwifiex_shutdown_sw() with mwifiex_pcie_remove(), it
+lacks mwifiex_init_shutdown_fw().
+
+This commit fixes mwifiex_shutdown_sw() by adding the missing
+mwifiex_init_shutdown_fw().
+
+Fixes: 4c5dae59d2e9 ("mwifiex: add PCIe function level reset support")
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index 9ee5600351a7..5965999f1b9b 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -1469,6 +1469,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
+ 	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ 	mwifiex_deauthenticate(priv, NULL);
+ 
++	mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
++
+ 	mwifiex_uninit_sw(adapter);
+ 	adapter->is_up = false;
+ 
+-- 
+2.28.0
+
+From f0a8812d81ab425af896717c2ef339d8f7d0557e Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Thu, 24 Sep 2020 01:56:34 +0900
+Subject: [PATCH] mwifiex: pcie: use shutdown_sw()/reinit_sw() on
+ suspend()/resume()
+
+There are issues with S0ix achievement and AP scanning after suspend
+with the current Host Sleep method.
+
+When using the Host Sleep method, it prevents the platform to reach S0ix
+during suspend. Also, after suspend, sometimes AP scanning won't work,
+resulting in non-working wifi.
+
+To fix such issues, perform shutdown_sw()/reinit_sw() instead of Host
+Sleep.
+
+As a side effect, this patch disables wakeups (means that Wake-On-WLAN
+can't be used anymore, if it was working before), and might also reset
+some internal states.
+
+Note that suspend() no longer checks if it's already suspended.
+
+With the previous Host Sleep method, the check was done by looking at
+adapter->hs_activated in mwifiex_enable_hs() [sta_ioctl.c], but not
+MWIFIEX_IS_SUSPENDED. So, what the previous method checked was instead
+Host Sleep state, not suspend itself. Therefore, there is no need to check
+the suspend state now.
+
+Also removed comment for suspend state check at top of suspend()
+accordingly.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/main.c |  4 +--
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 29 +++++++--------------
+ 2 files changed, 12 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index 5965999f1b9b..74bf0076daec 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -1453,7 +1453,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
+ }
+ 
+ /*
+- * This function gets called during PCIe function level reset.
++ * This function can be used for shutting down the adapter SW.
+  */
+ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
+ {
+@@ -1481,7 +1481,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
+ }
+ EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
+ 
+-/* This function gets called during PCIe function level reset. Required
++/* This function can be used for reinitting the adapter SW. Required
+  * code is extracted from mwifiex_add_card()
+  */
+ int
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index daae572ce94e..b46d56389c3b 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -145,8 +145,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
+  * registered functions must have drivers with suspend and resume
+  * methods. Failing that the kernel simply removes the whole card.
+  *
+- * If already not suspended, this function allocates and sends a host
+- * sleep activate request to the firmware and turns off the traffic.
++ * This function shuts down the adapter.
+  */
+ static int mwifiex_pcie_suspend(struct device *dev)
+ {
+@@ -154,31 +153,21 @@ static int mwifiex_pcie_suspend(struct device *dev)
+ 	struct pcie_service_card *card = dev_get_drvdata(dev);
+ 
+ 
+-	/* Might still be loading firmware */
+-	wait_for_completion(&card->fw_done);
+-
+ 	adapter = card->adapter;
+ 	if (!adapter) {
+ 		dev_err(dev, "adapter is not valid\n");
+ 		return 0;
+ 	}
+ 
+-	mwifiex_enable_wake(adapter);
+-
+-	/* Enable the Host Sleep */
+-	if (!mwifiex_enable_hs(adapter)) {
++	/* Shut down SW */
++	if (mwifiex_shutdown_sw(adapter)) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "cmd: failed to suspend\n");
+-		clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
+-		mwifiex_disable_wake(adapter);
+ 		return -EFAULT;
+ 	}
+ 
+-	flush_workqueue(adapter->workqueue);
+-
+ 	/* Indicate device suspended */
+ 	set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+-	clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
+ 
+ 	return 0;
+ }
+@@ -188,13 +177,13 @@ static int mwifiex_pcie_suspend(struct device *dev)
+  * registered functions must have drivers with suspend and resume
+  * methods. Failing that the kernel simply removes the whole card.
+  *
+- * If already not resumed, this function turns on the traffic and
+- * sends a host sleep cancel request to the firmware.
++ * If already not resumed, this function reinits the adapter.
+  */
+ static int mwifiex_pcie_resume(struct device *dev)
+ {
+ 	struct mwifiex_adapter *adapter;
+ 	struct pcie_service_card *card = dev_get_drvdata(dev);
++	int ret;
+ 
+ 
+ 	if (!card->adapter) {
+@@ -212,9 +201,11 @@ static int mwifiex_pcie_resume(struct device *dev)
+ 
+ 	clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ 
+-	mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+-			  MWIFIEX_ASYNC_CMD);
+-	mwifiex_disable_wake(adapter);
++	ret = mwifiex_reinit_sw(adapter);
++	if (ret)
++		dev_err(dev, "reinit failed: %d\n", ret);
++	else
++		mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ 
+ 	return 0;
+ }
+-- 
+2.28.0
+
+From 3754079592ed651678caaaf85ba6e974bcc5acf1 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Mon, 24 Aug 2020 17:11:35 +0900
+Subject: [PATCH] mwifiex: pcie: add enable_device_dump module parameter
+
+The devicve_dump may take a little bit long time and users may want to
+disable the dump for daily usage.
+
+This commit adds a new module parameter and disables device_dump by
+default.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index b46d56389c3b..1847a0274991 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -34,6 +34,11 @@
+ 
+ static struct mwifiex_if_ops pcie_ops;
+ 
++static bool enable_device_dump;
++module_param(enable_device_dump, bool, 0644);
++MODULE_PARM_DESC(enable_device_dump,
++		 "enable device_dump (default: disabled)");
++
+ static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+ 	{ .compatible = "pci11ab,2b42" },
+ 	{ .compatible = "pci1b4b,2b42" },
+@@ -2791,6 +2796,12 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+ 
+ static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+ {
++	if (!enable_device_dump) {
++		mwifiex_dbg(adapter, MSG,
++			    "device_dump is disabled by module parameter\n");
++		return;
++	}
++
+ 	adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ 	if (!adapter->devdump_data) {
+ 		mwifiex_dbg(adapter, ERROR,
+-- 
+2.28.0
+
+From 56e9e15e3c774e324afed5f8e2a9465c36cb0378 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:11:49 +0900
+Subject: [PATCH] mwifiex: pcie: disable bridge_d3 for Surface gen4+
+
+Currently, mwifiex fw will crash after suspend on recent kernel series.
+On Windows, it seems that the root port of wifi will never enter D3 state
+(stay on D0 state). And on Linux, disabling the D3 state for the
+bridge fixes fw crashing after suspend.
+
+This commit disables the D3 state of root port on driver initialization
+and fixes fw crashing after suspend.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c   |  7 +++++
+ .../wireless/marvell/mwifiex/pcie_quirks.c    | 27 +++++++++++++------
+ .../wireless/marvell/mwifiex/pcie_quirks.h    |  1 +
+ 3 files changed, 27 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 1847a0274991..3bd39d9ba3de 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -226,6 +226,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 					const struct pci_device_id *ent)
+ {
+ 	struct pcie_service_card *card;
++	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
+ 	int ret;
+ 
+ 	pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
+@@ -267,6 +268,12 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
+ 		return -1;
+ 	}
+ 
++	/* disable bridge_d3 for Surface gen4+ devices to fix fw crashing
++	 * after suspend
++	 */
++	if (card->quirks & QUIRK_NO_BRIDGE_D3)
++		parent_pdev->bridge_d3 = false;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+index 34dcd84f02a6..a2aeb2af907e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
+@@ -32,7 +32,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5",
+@@ -41,7 +42,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 5 (LTE)",
+@@ -50,7 +52,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Pro 6",
+@@ -58,7 +61,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Book 1",
+@@ -66,7 +70,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Book 2",
+@@ -74,7 +79,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 1",
+@@ -82,7 +88,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface Laptop 2",
+@@ -90,7 +97,8 @@ static const struct dmi_system_id mwifiex_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
+ 		},
+-		.driver_data = (void *)QUIRK_FW_RST_D3COLD,
++		.driver_data = (void *)(QUIRK_FW_RST_D3COLD |
++					QUIRK_NO_BRIDGE_D3),
+ 	},
+ 	{
+ 		.ident = "Surface 3",
+@@ -136,6 +144,9 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card)
+ 	if (card->quirks & QUIRK_FW_RST_WSID_S3)
+ 		dev_info(&pdev->dev,
+ 			 "quirk reset_wsid for Surface 3 enabled\n");
++	if (card->quirks & QUIRK_NO_BRIDGE_D3)
++		dev_info(&pdev->dev,
++			 "quirk no_brigde_d3 enabled\n");
+ }
+ 
+ static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+index 3ef7440418e3..a95ebac06e13 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
+@@ -11,6 +11,7 @@
+  * be handled differently. Currently, only S3 is supported.
+  */
+ #define QUIRK_FW_RST_WSID_S3	BIT(1)
++#define QUIRK_NO_BRIDGE_D3	BIT(2)
+ 
+ void mwifiex_initialize_quirks(struct pcie_service_card *card);
+ int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
+-- 
+2.28.0
+
+From 532503b8b7b9d60a57fc5d15cae07ddbf6c95627 Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:25:48 +0900
+Subject: [PATCH] mwifiex: add allow_ps_mode module parameter
+
+This commit adds the allow_ps_mode module parameter and set it false
+(disallowed) by default, to make ps_mode (power_save) control easier.
+
+On some setups (e.g., with 5GHz AP), power_save causes connection
+completely unstable. So, we need to disable it. However, userspace tools
+may try to enable it. For this reason, we need to tell userspace that
+power_save is disallowed by default.
+
+When this parameter is set to false, changing the power_save mode will
+be disallowed like the following:
+
+    $ sudo iw dev mlan0 set power_save on
+    command failed: Operation not permitted (-1)
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 96848fa0e417..786f7a197613 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -25,6 +25,11 @@
+ static char *reg_alpha2;
+ module_param(reg_alpha2, charp, 0);
+ 
++static bool allow_ps_mode;
++module_param(allow_ps_mode, bool, 0644);
++MODULE_PARM_DESC(allow_ps_mode,
++		 "allow WiFi power management to be enabled. (default: disallowed)");
++
+ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
+ 	{
+ 		.max = MWIFIEX_MAX_BSS_NUM,
+@@ -435,6 +440,17 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ 
+ 	ps_mode = enabled;
+ 
++	/* Allow ps_mode to be enabled only when allow_ps_mode is true */
++	if (ps_mode && !allow_ps_mode) {
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Enabling ps_mode disallowed by modparam\n");
++
++		/* Return -EPERM to inform userspace tools that setting
++		 * power_save to be enabled is not permitted.
++		 */
++		return -EPERM;
++	}
++
+ 	return mwifiex_drv_set_power(priv, &ps_mode);
+ }
+ 
+-- 
+2.28.0
+
+From 0972cfcfd39efd4e4ef96db7843adb1742b1da7d Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:38:48 +0900
+Subject: [PATCH] mwifiex: print message when changing ps_mode
+
+Users may want to know the ps_mode state change (e.g., diagnosing
+connection issues). This commit adds the print when changing ps_mode.
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 786f7a197613..8f4b8bc5ff03 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -451,6 +451,13 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ 		return -EPERM;
+ 	}
+ 
++	if (ps_mode)
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Enabling ps_mode, disable if unstable.\n");
++	else
++		mwifiex_dbg(priv->adapter, MSG,
++			    "Disabling ps_mode.\n");
++
+ 	return mwifiex_drv_set_power(priv, &ps_mode);
+ }
+ 
+-- 
+2.28.0
+
+From e22985a52f59503c3e7e85e71dae28f19d333beb Mon Sep 17 00:00:00 2001
+From: Tsuchiya Yuto <kitakar@gmail.com>
+Date: Sun, 4 Oct 2020 00:59:37 +0900
+Subject: [PATCH] mwifiex: disable ps_mode explicitly by default instead
+
+At least on Surface devices, the ps_mode causes connection unstable,
+especially with 5GHz APs. Then, it eventually causes fw crashing.
+
+This commit disables ps_mode by default instead of enabling it.
+
+Required code is extracted from mwifiex_drv_set_power().
+
+Signed-off-by: Tsuchiya Yuto <kitakar@gmail.com>
+Patchset: wifi
+---
+ drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+index d3a968ef21ef..9b7b52fbc9c4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+@@ -2333,14 +2333,19 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
+ 			return -1;
+ 
+ 		if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+-			/* Enable IEEE PS by default */
+-			priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
++			/* Disable IEEE PS by default */
++			priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
+ 			ret = mwifiex_send_cmd(priv,
+ 					       HostCmd_CMD_802_11_PS_MODE_ENH,
+-					       EN_AUTO_PS, BITMAP_STA_PS, NULL,
++					       DIS_AUTO_PS, BITMAP_STA_PS, NULL,
+ 					       true);
+ 			if (ret)
+ 				return -1;
++			ret = mwifiex_send_cmd(priv,
++					       HostCmd_CMD_802_11_PS_MODE_ENH,
++					       GET_PS, 0, NULL, false);
++			if (ret)
++				return -1;
+ 		}
+ 
+ 		if (drcs) {
+-- 
+2.28.0
+

+ 1416 - 0
patches/5.9/0003-ipts.patch

@@ -0,0 +1,1416 @@
+From 4d69a6adcc5ddac18b1db15c3ac08448f5e9bd39 Mon Sep 17 00:00:00 2001
+From: Dorian Stoll <dorian.stoll@tmsp.io>
+Date: Fri, 25 Sep 2020 18:06:05 +0200
+Subject: [PATCH] mei: Remove client devices before shutting down
+
+Patchset: ipts
+---
+ drivers/misc/mei/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
+index bcee77768b91..21ed765003e1 100644
+--- a/drivers/misc/mei/init.c
++++ b/drivers/misc/mei/init.c
+@@ -302,10 +302,10 @@ void mei_stop(struct mei_device *dev)
+ {
+ 	dev_dbg(dev->dev, "stopping the device.\n");
+ 
++	mei_cl_bus_remove_devices(dev);
+ 	mutex_lock(&dev->device_lock);
+ 	mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ 	mutex_unlock(&dev->device_lock);
+-	mei_cl_bus_remove_devices(dev);
+ 
+ 	mei_cancel_work(dev);
+ 
+-- 
+2.28.0
+
+From 2a15f76fdbeeb3f3b1ac1a7ae4a9591d017c5f53 Mon Sep 17 00:00:00 2001
+From: Dorian Stoll <dorian.stoll@tmsp.io>
+Date: Thu, 30 Jul 2020 13:21:53 +0200
+Subject: [PATCH] misc: mei: Add missing IPTS device IDs
+
+Patchset: ipts
+---
+ drivers/misc/mei/hw-me-regs.h | 1 +
+ drivers/misc/mei/pci-me.c     | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 9cf8d8f60cfe..ca2d4faff6a2 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -92,6 +92,7 @@
+ #define MEI_DEV_ID_CDF        0x18D3  /* Cedar Fork */
+ 
+ #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
++#define MEI_DEV_ID_ICP_LP_3   0x34E4  /* Ice Lake Point LP 3 (iTouch) */
+ 
+ #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
+ 
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1de9ef7a272b..e12484840f88 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP_3, MEI_ME_PCH12_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
+-- 
+2.28.0
+
+From 0dfdb2c47e57d7e1511e4b6085424ba8f7e7565e Mon Sep 17 00:00:00 2001
+From: Dorian Stoll <dorian.stoll@tmsp.io>
+Date: Thu, 6 Aug 2020 11:20:41 +0200
+Subject: [PATCH] misc: Add support for Intel Precise Touch & Stylus
+
+Based on linux-surface/intel-precise-touch@3f362c
+
+Signed-off-by: Dorian Stoll <dorian.stoll@tmsp.io>
+Patchset: ipts
+---
+ drivers/misc/Kconfig          |   1 +
+ drivers/misc/Makefile         |   1 +
+ drivers/misc/ipts/Kconfig     |  17 ++
+ drivers/misc/ipts/Makefile    |  12 ++
+ drivers/misc/ipts/context.h   |  48 +++++
+ drivers/misc/ipts/control.c   |  73 ++++++++
+ drivers/misc/ipts/control.h   |  23 +++
+ drivers/misc/ipts/mei.c       | 128 ++++++++++++++
+ drivers/misc/ipts/protocol.h  | 319 ++++++++++++++++++++++++++++++++++
+ drivers/misc/ipts/receiver.c  | 183 +++++++++++++++++++
+ drivers/misc/ipts/receiver.h  |  17 ++
+ drivers/misc/ipts/resources.c | 134 ++++++++++++++
+ drivers/misc/ipts/resources.h |  18 ++
+ drivers/misc/ipts/uapi.c      | 190 ++++++++++++++++++++
+ drivers/misc/ipts/uapi.h      |  47 +++++
+ 15 files changed, 1211 insertions(+)
+ create mode 100644 drivers/misc/ipts/Kconfig
+ create mode 100644 drivers/misc/ipts/Makefile
+ create mode 100644 drivers/misc/ipts/context.h
+ create mode 100644 drivers/misc/ipts/control.c
+ create mode 100644 drivers/misc/ipts/control.h
+ create mode 100644 drivers/misc/ipts/mei.c
+ create mode 100644 drivers/misc/ipts/protocol.h
+ create mode 100644 drivers/misc/ipts/receiver.c
+ create mode 100644 drivers/misc/ipts/receiver.h
+ create mode 100644 drivers/misc/ipts/resources.c
+ create mode 100644 drivers/misc/ipts/resources.h
+ create mode 100644 drivers/misc/ipts/uapi.c
+ create mode 100644 drivers/misc/ipts/uapi.h
+
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index ce136d685d14..102969c546d7 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -472,4 +472,5 @@ source "drivers/misc/ocxl/Kconfig"
+ source "drivers/misc/cardreader/Kconfig"
+ source "drivers/misc/habanalabs/Kconfig"
+ source "drivers/misc/uacce/Kconfig"
++source "drivers/misc/ipts/Kconfig"
+ endmenu
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index c7bd01ac6291..f97938d777e1 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -57,3 +57,4 @@ obj-$(CONFIG_PVPANIC)   	+= pvpanic.o
+ obj-$(CONFIG_HABANA_AI)		+= habanalabs/
+ obj-$(CONFIG_UACCE)		+= uacce/
+ obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
++obj-$(CONFIG_MISC_IPTS)		+= ipts/
+diff --git a/drivers/misc/ipts/Kconfig b/drivers/misc/ipts/Kconfig
+new file mode 100644
+index 000000000000..83e2a930c396
+--- /dev/null
++++ b/drivers/misc/ipts/Kconfig
+@@ -0,0 +1,17 @@
++# SPDX-License-Identifier: GPL-2.0-or-later
++
++config MISC_IPTS
++	tristate "Intel Precise Touch & Stylus"
++	depends on INTEL_MEI
++	help
++	  Say Y here if your system has a touchscreen using Intels
++	  Precise Touch & Stylus (IPTS) technology.
++
++	  If unsure say N.
++
++	  To compile this driver as a module, choose M here: the
++	  module will be called ipts.
++
++	  Building this driver alone will not give you a working touchscreen.
++	  It only exposed a userspace API that can be used by a daemon to
++	  receive and process data from the touchscreen hardware.
+diff --git a/drivers/misc/ipts/Makefile b/drivers/misc/ipts/Makefile
+new file mode 100644
+index 000000000000..8f58b9adbc94
+--- /dev/null
++++ b/drivers/misc/ipts/Makefile
+@@ -0,0 +1,12 @@
++# SPDX-License-Identifier: GPL-2.0-or-later
++#
++# Makefile for the IPTS touchscreen driver
++#
++
++obj-$(CONFIG_MISC_IPTS) += ipts.o
++ipts-objs := control.o
++ipts-objs += mei.o
++ipts-objs += receiver.o
++ipts-objs += resources.o
++ipts-objs += uapi.o
++
+diff --git a/drivers/misc/ipts/context.h b/drivers/misc/ipts/context.h
+new file mode 100644
+index 000000000000..6e8eba3a47e5
+--- /dev/null
++++ b/drivers/misc/ipts/context.h
+@@ -0,0 +1,48 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_CONTEXT_H_
++#define _IPTS_CONTEXT_H_
++
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/mei_cl_bus.h>
++#include <linux/types.h>
++
++#include "protocol.h"
++
++enum ipts_host_status {
++	IPTS_HOST_STATUS_STARTING,
++	IPTS_HOST_STATUS_STARTED,
++	IPTS_HOST_STATUS_STOPPING,
++	IPTS_HOST_STATUS_STOPPED,
++};
++
++struct ipts_buffer_info {
++	u8 *address;
++	dma_addr_t dma_address;
++};
++
++struct ipts_context {
++	struct mei_cl_device *cldev;
++	struct device *dev;
++
++	bool restart;
++	enum ipts_host_status status;
++	struct ipts_get_device_info_rsp device_info;
++
++	struct ipts_buffer_info data[IPTS_BUFFERS];
++	struct ipts_buffer_info doorbell;
++
++	struct ipts_buffer_info feedback[IPTS_BUFFERS];
++	struct ipts_buffer_info workqueue;
++	struct ipts_buffer_info host2me;
++};
++
++#endif /* _IPTS_CONTEXT_H_ */
++
+diff --git a/drivers/misc/ipts/control.c b/drivers/misc/ipts/control.c
+new file mode 100644
+index 000000000000..98787d7ea292
+--- /dev/null
++++ b/drivers/misc/ipts/control.c
+@@ -0,0 +1,73 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/mei_cl_bus.h>
++
++#include "context.h"
++#include "protocol.h"
++#include "resources.h"
++#include "uapi.h"
++
++int ipts_control_send(struct ipts_context *ipts,
++		u32 code, void *payload, size_t size)
++{
++	int ret;
++	struct ipts_command cmd;
++
++	memset(&cmd, 0, sizeof(struct ipts_command));
++	cmd.code = code;
++
++	if (payload && size > 0)
++		memcpy(&cmd.payload, payload, size);
++
++	ret = mei_cldev_send(ipts->cldev, (u8 *)&cmd, sizeof(cmd.code) + size);
++	if (ret >= 0 || ret == -EINTR)
++		return 0;
++
++	dev_err(ipts->dev, "Error while sending: 0x%X:%d\n", code, ret);
++	return ret;
++}
++
++int ipts_control_start(struct ipts_context *ipts)
++{
++	if (ipts->status != IPTS_HOST_STATUS_STOPPED)
++		return -EBUSY;
++
++	dev_info(ipts->dev, "Starting IPTS\n");
++	ipts->status = IPTS_HOST_STATUS_STARTING;
++	ipts->restart = false;
++
++	ipts_uapi_link(ipts);
++	return ipts_control_send(ipts, IPTS_CMD_GET_DEVICE_INFO, NULL, 0);
++}
++
++int ipts_control_stop(struct ipts_context *ipts)
++{
++	if (ipts->status == IPTS_HOST_STATUS_STOPPING)
++		return -EBUSY;
++
++	if (ipts->status == IPTS_HOST_STATUS_STOPPED)
++		return -EBUSY;
++
++	dev_info(ipts->dev, "Stopping IPTS\n");
++	ipts->status = IPTS_HOST_STATUS_STOPPING;
++
++	ipts_uapi_unlink();
++	ipts_resources_free(ipts);
++	return ipts_control_send(ipts, IPTS_CMD_CLEAR_MEM_WINDOW, NULL, 0);
++}
++
++int ipts_control_restart(struct ipts_context *ipts)
++{
++	if (ipts->restart)
++		return -EBUSY;
++
++	ipts->restart = true;
++	return ipts_control_stop(ipts);
++}
++
+diff --git a/drivers/misc/ipts/control.h b/drivers/misc/ipts/control.h
+new file mode 100644
+index 000000000000..2b3172c16063
+--- /dev/null
++++ b/drivers/misc/ipts/control.h
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_CONTROL_H_
++#define _IPTS_CONTROL_H_
++
++#include <linux/types.h>
++
++#include "context.h"
++
++int ipts_control_send(struct ipts_context *ipts,
++		u32 cmd, void *payload, size_t size);
++int ipts_control_start(struct ipts_context *ipts);
++int ipts_control_restart(struct ipts_context *ipts);
++int ipts_control_stop(struct ipts_context *ipts);
++
++#endif /* _IPTS_CONTROL_H_ */
++
+diff --git a/drivers/misc/ipts/mei.c b/drivers/misc/ipts/mei.c
+new file mode 100644
+index 000000000000..b74e45c55b62
+--- /dev/null
++++ b/drivers/misc/ipts/mei.c
+@@ -0,0 +1,128 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/mei_cl_bus.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/slab.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "receiver.h"
++#include "uapi.h"
++
++static int ipts_mei_set_dma_mask(struct mei_cl_device *cldev)
++{
++	int ret;
++
++	ret = dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(64));
++	if (!ret)
++		return 0;
++
++	return dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(32));
++}
++
++static int ipts_mei_probe(struct mei_cl_device *cldev,
++		const struct mei_cl_device_id *id)
++{
++	int ret;
++	struct ipts_context *ipts;
++
++	if (ipts_mei_set_dma_mask(cldev)) {
++		dev_err(&cldev->dev, "Failed to set DMA mask for IPTS\n");
++		return -EFAULT;
++	}
++
++	ret = mei_cldev_enable(cldev);
++	if (ret) {
++		dev_err(&cldev->dev, "Failed to enable MEI device: %d\n", ret);
++		return ret;
++	}
++
++	ipts = kzalloc(sizeof(struct ipts_context), GFP_KERNEL);
++	if (!ipts) {
++		mei_cldev_disable(cldev);
++		return -ENOMEM;
++	}
++
++	ipts->cldev = cldev;
++	ipts->dev = &cldev->dev;
++	ipts->status = IPTS_HOST_STATUS_STOPPED;
++
++	mei_cldev_set_drvdata(cldev, ipts);
++	mei_cldev_register_rx_cb(cldev, ipts_receiver_callback);
++
++	return ipts_control_start(ipts);
++}
++
++static int ipts_mei_remove(struct mei_cl_device *cldev)
++{
++	int i;
++	struct ipts_context *ipts = mei_cldev_get_drvdata(cldev);
++
++	ipts_control_stop(ipts);
++
++	for (i = 0; i < 20; i++) {
++		if (ipts->status == IPTS_HOST_STATUS_STOPPED)
++			break;
++
++		msleep(25);
++	}
++
++	mei_cldev_disable(cldev);
++	kfree(ipts);
++
++	return 0;
++}
++
++static struct mei_cl_device_id ipts_mei_device_id_table[] = {
++	{ "", IPTS_MEI_UUID, MEI_CL_VERSION_ANY },
++	{ },
++};
++MODULE_DEVICE_TABLE(mei, ipts_mei_device_id_table);
++
++static struct mei_cl_driver ipts_mei_driver = {
++	.id_table = ipts_mei_device_id_table,
++	.name = "ipts",
++	.probe = ipts_mei_probe,
++	.remove = ipts_mei_remove,
++};
++
++static int __init ipts_mei_init(void)
++{
++	int ret;
++
++	ret = ipts_uapi_init();
++	if (ret)
++		return ret;
++
++	ret = mei_cldev_driver_register(&ipts_mei_driver);
++	if (ret) {
++		ipts_uapi_free();
++		return ret;
++	}
++
++	return 0;
++}
++
++static void __exit ipts_mei_exit(void)
++{
++	mei_cldev_driver_unregister(&ipts_mei_driver);
++	ipts_uapi_free();
++}
++
++MODULE_DESCRIPTION("IPTS touchscreen driver");
++MODULE_AUTHOR("Dorian Stoll <dorian.stoll@tmsp.io>");
++MODULE_LICENSE("GPL");
++
++module_init(ipts_mei_init);
++module_exit(ipts_mei_exit);
++
+diff --git a/drivers/misc/ipts/protocol.h b/drivers/misc/ipts/protocol.h
+new file mode 100644
+index 000000000000..2e179cbb9af3
+--- /dev/null
++++ b/drivers/misc/ipts/protocol.h
+@@ -0,0 +1,319 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_PROTOCOL_H_
++#define _IPTS_PROTOCOL_H_
++
++#include <linux/types.h>
++
++/*
++ * The MEI client ID for IPTS functionality.
++ */
++#define IPTS_MEI_UUID UUID_LE(0x3e8d0870, 0x271a, 0x4208, \
++		0x8e, 0xb5, 0x9a, 0xcb, 0x94, 0x02, 0xae, 0x04)
++
++/*
++ * Queries the device for vendor specific information.
++ *
++ * The command must not contain any payload.
++ * The response will contain struct ipts_get_device_info_rsp as payload.
++ */
++#define IPTS_CMD_GET_DEVICE_INFO 0x00000001
++#define IPTS_RSP_GET_DEVICE_INFO 0x80000001
++
++/*
++ * Sets the mode that IPTS will operate in.
++ *
++ * The command must contain struct ipts_set_mode_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_SET_MODE 0x00000002
++#define IPTS_RSP_SET_MODE 0x80000002
++
++/*
++ * Configures the memory buffers that the ME will use
++ * for passing data to the host.
++ *
++ * The command must contain struct ipts_set_mem_window_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_SET_MEM_WINDOW 0x00000003
++#define IPTS_RSP_SET_MEM_WINDOW 0x80000003
++
++/*
++ * Signals that the host is ready to receive data to the ME.
++ *
++ * The command must not contain any payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_READY_FOR_DATA 0x00000005
++#define IPTS_RSP_READY_FOR_DATA 0x80000005
++
++/*
++ * Signals that a buffer can be refilled to the ME.
++ *
++ * The command must contain struct ipts_feedback_cmd as payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_FEEDBACK 0x00000006
++#define IPTS_RSP_FEEDBACK 0x80000006
++
++/*
++ * Resets the data flow from the ME to the hosts and
++ * clears the buffers that were set with SET_MEM_WINDOW.
++ *
++ * The command must not contain any payload.
++ * The response will not contain any payload.
++ */
++#define IPTS_CMD_CLEAR_MEM_WINDOW 0x00000007
++#define IPTS_RSP_CLEAR_MEM_WINDOW 0x80000007
++
++/*
++ * Singletouch mode is a fallback that does not support
++ * a stylus or more than one touch input. The data is
++ * received as a HID report with report ID 64.
++ */
++#define IPTS_MODE_SINGLETOUCH 0x0
++
++/*
++ * Multitouch mode is the "proper" operation mode for IPTS. It will
++ * return stylus data as well as capacitive heatmap touch data.
++ * This data needs to be processed in userspace before it can be used.
++ */
++#define IPTS_MODE_MULTITOUCH 0x1
++
++/*
++ * Operation completed successfully.
++ */
++#define IPTS_STATUS_SUCCESS 0x0
++
++/*
++ * Command contained a payload with invalid parameters.
++ */
++#define IPTS_STATUS_INVALID_PARAMS 0x1
++
++/*
++ * ME was unable to validate buffer addresses supplied by the host.
++ */
++#define IPTS_STATUS_ACCESS_DENIED 0x2
++
++/*
++ * Command contained a payload with an invalid size.
++ */
++#define IPTS_STATUS_CMD_SIZE_ERROR 0x3
++
++/*
++ * Buffer addresses have not been set, or the
++ * device is not ready for operation yet.
++ */
++#define IPTS_STATUS_NOT_READY 0x4
++
++/*
++ * There is an outstanding command of the same type. The host must
++ * wait for a response before sending another command of the same type.
++ */
++#define IPTS_STATUS_REQUEST_OUTSTANDING 0x5
++
++/*
++ * No sensor could be found. Either no sensor is connected, it has not
++ * been initialized yet, or the system is improperly configured.
++ */
++#define IPTS_STATUS_NO_SENSOR_FOUND 0x6
++
++/*
++ * Not enough free memory for requested operation.
++ */
++#define IPTS_STATUS_OUT_OF_MEMORY 0x7
++
++/*
++ * An unexpected error occured.
++ */
++#define IPTS_STATUS_INTERNAL_ERROR 0x8
++
++/*
++ * The sensor has been disabled / reset and must be reinitialized.
++ */
++#define IPTS_STATUS_SENSOR_DISABLED 0x9
++
++/*
++ * Compatibility revision check between sensor and ME failed.
++ * The host can ignore this error and attempt to continue.
++ */
++#define IPTS_STATUS_COMPAT_CHECK_FAIL 0xA
++
++/*
++ * The sensor went through a reset initiated by the ME / the host.
++ */
++#define IPTS_STATUS_SENSOR_EXPECTED_RESET 0xB
++
++/*
++ * The sensor went through an unexpected reset.
++ */
++#define IPTS_STATUS_SENSOR_UNEXPECTED_RESET 0xC
++
++/*
++ * Requested sensor reset failed to complete.
++ */
++#define IPTS_STATUS_RESET_FAILED 0xD
++
++/*
++ * The operation timed out.
++ */
++#define IPTS_STATUS_TIMEOUT 0xE
++
++/*
++ * Test mode pattern did not match expected values.
++ */
++#define IPTS_STATUS_TEST_MODE_FAIL 0xF
++
++/*
++ * The sensor reported fatal error during reset sequence.
++ * Futher progress is not possible.
++ */
++#define IPTS_STATUS_SENSOR_FAIL_FATAL 0x10
++
++/*
++ * The sensor reported fatal error during reset sequence.
++ * The host can attempt to continue.
++ */
++#define IPTS_STATUS_SENSOR_FAIL_NONFATAL 0x11
++
++/*
++ * The sensor reported invalid capabilities.
++ */
++#define IPTS_STATUS_INVALID_DEVICE_CAPS 0x12
++
++/*
++ * The command cannot be completed until Quiesce IO flow has completed.
++ */
++#define IPTS_STATUS_QUIESCE_IO_IN_PROGRESS 0x13
++
++/*
++ * The amount of buffers that is used for IPTS
++ */
++#define IPTS_BUFFERS 16
++
++#define IPTS_WORKQUEUE_SIZE 8192
++#define IPTS_WORKQUEUE_ITEM_SIZE 16
++
++/**
++ * struct ipts_set_mode_cmd - Payload for the SET_MODE command.
++ *
++ * @mode: The mode that IPTS should operate in. (IPTS_MODE_*)
++ *
++ * This driver only supports multitouch mode. Singletouch mode
++ * requires a different control flow that is not implemented.
++ */
++struct ipts_set_mode_cmd {
++	u32 mode;
++	u8 reserved[12];
++} __packed;
++
++/**
++ * struct ipts_set_mem_window_cmd - Payload for the SET_MEM_WINDOW command.
++ *
++ * @data_buffer_addr_lower:     Lower 32 bits of the data buffer addresses.
++ * @data_buffer_addr_upper:     Upper 32 bits of the data buffer addresses.
++ * @workqueue_addr_lower:       Lower 32 bits of the workqueue buffer address.
++ * @workqueue_addr_upper:       Upper 32 bits of the workqueue buffer address.
++ * @doorbell_addr_lower:        Lower 32 bits of the doorbell buffer address.
++ * @doorbell_addr_upper:        Upper 32 bits of the doorbell buffer address.
++ * @feedback_buffer_addr_lower: Lower 32 bits of the feedback buffer addresses.
++ * @feedback_buffer_addr_upper: Upper 32 bits of the feedback buffer addresses.
++ * @host2me_addr_lower:         Lower 32 bits of the host2me buffer address.
++ * @host2me_addr_upper:         Upper 32 bits of the host2me buffer address.
++ * @workqueue_item_size:        Constant value. (IPTS_WORKQUEUE_ITEM_SIZE)
++ * @workqueue_size:             Constant value. (IPTS_WORKQUEUE_SIZE)
++ *
++ * The data buffers are buffers that get filled with touch data by the ME.
++ * The doorbell buffer is a u32 that gets incremented by the ME once a data
++ * buffer has been filled with new data.
++ *
++ * The other buffers are required for using GuC submission with binary
++ * firmware. Since support for GuC submission has been dropped from i915,
++ * they are not used anymore, but they need to be allocated to ensure proper
++ * operation.
++ */
++struct ipts_set_mem_window_cmd {
++	u32 data_buffer_addr_lower[IPTS_BUFFERS];
++	u32 data_buffer_addr_upper[IPTS_BUFFERS];
++	u32 workqueue_addr_lower;
++	u32 workqueue_addr_upper;
++	u32 doorbell_addr_lower;
++	u32 doorbell_addr_upper;
++	u32 feedback_buffer_addr_lower[IPTS_BUFFERS];
++	u32 feedback_buffer_addr_upper[IPTS_BUFFERS];
++	u32 host2me_addr_lower;
++	u32 host2me_addr_upper;
++	u32 host2me_size;
++	u8 reserved1;
++	u8 workqueue_item_size;
++	u16 workqueue_size;
++	u8 reserved[32];
++} __packed;
++
++/**
++ * struct ipts_feedback_cmd - Payload for the FEEDBACK command.
++ *
++ * @buffer: The buffer that the ME should refill.
++ */
++struct ipts_feedback_cmd {
++	u32 buffer;
++	u8 reserved[12];
++} __packed;
++
++/**
++ * struct ipts_command - A message sent from the host to the ME.
++ *
++ * @code:    The message code describing the command (IPTS_CMD_*)
++ * @payload: Payload for the command, or 0 if no payload is required.
++ */
++struct ipts_command {
++	u32 code;
++	u8 payload[320];
++} __packed;
++
++/**
++ * struct ipts_device_info - Payload for the GET_DEVICE_INFO response.
++ *
++ * @vendor_id:     Vendor ID of the touch sensor.
++ * @device_id:     Device ID of the touch sensor.
++ * @hw_rev:        Hardware revision of the touch sensor.
++ * @fw_rev:        Firmware revision of the touch sensor.
++ * @data_size:     Required size of one data buffer.
++ * @feedback_size: Required size of one feedback buffer.
++ * @mode:          Current operation mode of IPTS (IPTS_MODE_*)
++ * @max_contacts:  The amount of concurrent touches supported by the sensor.
++ */
++struct ipts_get_device_info_rsp {
++	u16 vendor_id;
++	u16 device_id;
++	u32 hw_rev;
++	u32 fw_rev;
++	u32 data_size;
++	u32 feedback_size;
++	u32 mode;
++	u8 max_contacts;
++	u8 reserved[19];
++} __packed;
++
++/**
++ * struct ipts_response - A message sent from the ME to the host.
++ *
++ * @code:    The message code describing the response (IPTS_RSP_*)
++ * @status:  The status code returned by the command. (IPTS_STATUS_*)
++ * @payload: Payload returned by the command.
++ */
++struct ipts_response {
++	u32 code;
++	u32 status;
++	u8 payload[80];
++} __packed;
++
++#endif /* _IPTS_PROTOCOL_H_ */
++
+diff --git a/drivers/misc/ipts/receiver.c b/drivers/misc/ipts/receiver.c
+new file mode 100644
+index 000000000000..3660a1dcfff9
+--- /dev/null
++++ b/drivers/misc/ipts/receiver.c
+@@ -0,0 +1,183 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/mei_cl_bus.h>
++#include <linux/types.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "resources.h"
++
++static int ipts_receiver_handle_get_device_info(struct ipts_context *ipts,
++		struct ipts_response *rsp)
++{
++	struct ipts_set_mode_cmd cmd;
++
++	memcpy(&ipts->device_info, rsp->payload,
++			sizeof(struct ipts_get_device_info_rsp));
++
++	memset(&cmd, 0, sizeof(struct ipts_set_mode_cmd));
++	cmd.mode = IPTS_MODE_MULTITOUCH;
++
++	return ipts_control_send(ipts, IPTS_CMD_SET_MODE,
++			&cmd, sizeof(struct ipts_set_mode_cmd));
++}
++
++static int ipts_receiver_handle_set_mode(struct ipts_context *ipts)
++{
++	int i, ret;
++	struct ipts_set_mem_window_cmd cmd;
++
++	ret = ipts_resources_alloc(ipts);
++	if (ret) {
++		dev_err(ipts->dev, "Failed to allocate resources\n");
++		return ret;
++	}
++
++	memset(&cmd, 0, sizeof(struct ipts_set_mem_window_cmd));
++
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		cmd.data_buffer_addr_lower[i] =
++			lower_32_bits(ipts->data[i].dma_address);
++
++		cmd.data_buffer_addr_upper[i] =
++			upper_32_bits(ipts->data[i].dma_address);
++
++		cmd.feedback_buffer_addr_lower[i] =
++			lower_32_bits(ipts->feedback[i].dma_address);
++
++		cmd.feedback_buffer_addr_upper[i] =
++			upper_32_bits(ipts->feedback[i].dma_address);
++	}
++
++	cmd.workqueue_addr_lower = lower_32_bits(ipts->workqueue.dma_address);
++	cmd.workqueue_addr_upper = upper_32_bits(ipts->workqueue.dma_address);
++
++	cmd.doorbell_addr_lower = lower_32_bits(ipts->doorbell.dma_address);
++	cmd.doorbell_addr_upper = upper_32_bits(ipts->doorbell.dma_address);
++
++	cmd.host2me_addr_lower = lower_32_bits(ipts->host2me.dma_address);
++	cmd.host2me_addr_upper = upper_32_bits(ipts->host2me.dma_address);
++
++	cmd.workqueue_size = IPTS_WORKQUEUE_SIZE;
++	cmd.workqueue_item_size = IPTS_WORKQUEUE_ITEM_SIZE;
++
++	return ipts_control_send(ipts, IPTS_CMD_SET_MEM_WINDOW,
++			&cmd, sizeof(struct ipts_set_mem_window_cmd));
++}
++
++static int ipts_receiver_handle_set_mem_window(struct ipts_context *ipts)
++{
++	dev_info(ipts->dev, "Device %04hX:%04hX ready\n",
++			ipts->device_info.vendor_id,
++			ipts->device_info.device_id);
++	ipts->status = IPTS_HOST_STATUS_STARTED;
++
++	return ipts_control_send(ipts, IPTS_CMD_READY_FOR_DATA, NULL, 0);
++}
++
++static int ipts_receiver_handle_clear_mem_window(struct ipts_context *ipts)
++{
++	if (ipts->restart)
++		return ipts_control_start(ipts);
++
++	ipts->status = IPTS_HOST_STATUS_STOPPED;
++	return 0;
++}
++
++static bool ipts_receiver_handle_error(struct ipts_context *ipts,
++		struct ipts_response *rsp)
++{
++	bool error;
++
++	switch (rsp->status) {
++	case IPTS_STATUS_SUCCESS:
++	case IPTS_STATUS_COMPAT_CHECK_FAIL:
++		error = false;
++		break;
++	case IPTS_STATUS_INVALID_PARAMS:
++		error = rsp->code != IPTS_RSP_FEEDBACK;
++		break;
++	case IPTS_STATUS_SENSOR_DISABLED:
++		error = ipts->status != IPTS_HOST_STATUS_STOPPING;
++		break;
++	default:
++		error = true;
++		break;
++	}
++
++	if (!error)
++		return false;
++
++	dev_err(ipts->dev, "Command 0x%08x failed: %d\n",
++			rsp->code, rsp->status);
++
++	if (rsp->code == IPTS_STATUS_SENSOR_UNEXPECTED_RESET) {
++		dev_err(ipts->dev, "Sensor was reset\n");
++
++		if (ipts_control_restart(ipts))
++			dev_err(ipts->dev, "Failed to restart IPTS\n");
++	}
++
++	return true;
++}
++
++static void ipts_receiver_handle_response(struct ipts_context *ipts,
++		struct ipts_response *rsp)
++{
++	int ret;
++
++	if (ipts_receiver_handle_error(ipts, rsp))
++		return;
++
++	switch (rsp->code) {
++	case IPTS_RSP_GET_DEVICE_INFO:
++		ret = ipts_receiver_handle_get_device_info(ipts, rsp);
++		break;
++	case IPTS_RSP_SET_MODE:
++		ret = ipts_receiver_handle_set_mode(ipts);
++		break;
++	case IPTS_RSP_SET_MEM_WINDOW:
++		ret = ipts_receiver_handle_set_mem_window(ipts);
++		break;
++	case IPTS_RSP_CLEAR_MEM_WINDOW:
++		ret = ipts_receiver_handle_clear_mem_window(ipts);
++		break;
++	default:
++		ret = 0;
++		break;
++	}
++
++	if (!ret)
++		return;
++
++	dev_err(ipts->dev, "Error while handling response 0x%08x: %d\n",
++			rsp->code, ret);
++
++	if (ipts_control_stop(ipts))
++		dev_err(ipts->dev, "Failed to stop IPTS\n");
++}
++
++void ipts_receiver_callback(struct mei_cl_device *cldev)
++{
++	int ret;
++	struct ipts_response rsp;
++	struct ipts_context *ipts;
++
++	ipts = mei_cldev_get_drvdata(cldev);
++
++	ret = mei_cldev_recv(cldev, (u8 *)&rsp, sizeof(struct ipts_response));
++	if (ret <= 0) {
++		dev_err(ipts->dev, "Error while reading response: %d\n", ret);
++		return;
++	}
++
++	ipts_receiver_handle_response(ipts, &rsp);
++}
++
+diff --git a/drivers/misc/ipts/receiver.h b/drivers/misc/ipts/receiver.h
+new file mode 100644
+index 000000000000..c061d57a9320
+--- /dev/null
++++ b/drivers/misc/ipts/receiver.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_RECEIVER_H_
++#define _IPTS_RECEIVER_H_
++
++#include <linux/mei_cl_bus.h>
++
++void ipts_receiver_callback(struct mei_cl_device *cldev);
++
++#endif /* _IPTS_RECEIVER_H_ */
++
+diff --git a/drivers/misc/ipts/resources.c b/drivers/misc/ipts/resources.c
+new file mode 100644
+index 000000000000..bcf4dd8d7e95
+--- /dev/null
++++ b/drivers/misc/ipts/resources.c
+@@ -0,0 +1,134 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/dma-mapping.h>
++
++#include "context.h"
++
++void ipts_resources_free(struct ipts_context *ipts)
++{
++	int i;
++	struct ipts_buffer_info *buffers;
++
++	u32 data_buffer_size = ipts->device_info.data_size;
++	u32 feedback_buffer_size = ipts->device_info.feedback_size;
++
++	buffers = ipts->data;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		if (!buffers[i].address)
++			continue;
++
++		dma_free_coherent(ipts->dev, data_buffer_size,
++				buffers[i].address, buffers[i].dma_address);
++
++		buffers[i].address = NULL;
++		buffers[i].dma_address = 0;
++	}
++
++	buffers = ipts->feedback;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		if (!buffers[i].address)
++			continue;
++
++		dma_free_coherent(ipts->dev, feedback_buffer_size,
++				buffers[i].address, buffers[i].dma_address);
++
++		buffers[i].address = NULL;
++		buffers[i].dma_address = 0;
++	}
++
++	if (ipts->doorbell.address) {
++		dma_free_coherent(ipts->dev, sizeof(u32),
++				ipts->doorbell.address,
++				ipts->doorbell.dma_address);
++
++		ipts->doorbell.address = NULL;
++		ipts->doorbell.dma_address = 0;
++	}
++
++	if (ipts->workqueue.address) {
++		dma_free_coherent(ipts->dev, sizeof(u32),
++				ipts->workqueue.address,
++				ipts->workqueue.dma_address);
++
++		ipts->workqueue.address = NULL;
++		ipts->workqueue.dma_address = 0;
++	}
++
++	if (ipts->host2me.address) {
++		dma_free_coherent(ipts->dev, feedback_buffer_size,
++				ipts->host2me.address,
++				ipts->host2me.dma_address);
++
++		ipts->host2me.address = NULL;
++		ipts->host2me.dma_address = 0;
++	}
++}
++
++int ipts_resources_alloc(struct ipts_context *ipts)
++{
++	int i;
++	struct ipts_buffer_info *buffers;
++
++	u32 data_buffer_size = ipts->device_info.data_size;
++	u32 feedback_buffer_size = ipts->device_info.feedback_size;
++
++	buffers = ipts->data;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		buffers[i].address = dma_alloc_coherent(ipts->dev,
++				data_buffer_size,
++				&buffers[i].dma_address,
++				GFP_KERNEL);
++
++		if (!buffers[i].address)
++			goto release_resources;
++	}
++
++	buffers = ipts->feedback;
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		buffers[i].address = dma_alloc_coherent(ipts->dev,
++				feedback_buffer_size,
++				&buffers[i].dma_address,
++				GFP_KERNEL);
++
++		if (!buffers[i].address)
++			goto release_resources;
++	}
++
++	ipts->doorbell.address = dma_alloc_coherent(ipts->dev,
++			sizeof(u32),
++			&ipts->doorbell.dma_address,
++			GFP_KERNEL);
++
++	if (!ipts->doorbell.address)
++		goto release_resources;
++
++	ipts->workqueue.address = dma_alloc_coherent(ipts->dev,
++			sizeof(u32),
++			&ipts->workqueue.dma_address,
++			GFP_KERNEL);
++
++	if (!ipts->workqueue.address)
++		goto release_resources;
++
++	ipts->host2me.address = dma_alloc_coherent(ipts->dev,
++			feedback_buffer_size,
++			&ipts->host2me.dma_address,
++			GFP_KERNEL);
++
++	if (!ipts->workqueue.address)
++		goto release_resources;
++
++	return 0;
++
++release_resources:
++
++	ipts_resources_free(ipts);
++	return -ENOMEM;
++}
++
+diff --git a/drivers/misc/ipts/resources.h b/drivers/misc/ipts/resources.h
+new file mode 100644
+index 000000000000..8f55af7aae0f
+--- /dev/null
++++ b/drivers/misc/ipts/resources.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_RESOURCES_H_
++#define _IPTS_RESOURCES_H_
++
++#include "context.h"
++
++int ipts_resources_alloc(struct ipts_context *ipts);
++void ipts_resources_free(struct ipts_context *ipts);
++
++#endif /* _IPTS_RESOURCES_H_ */
++
+diff --git a/drivers/misc/ipts/uapi.c b/drivers/misc/ipts/uapi.c
+new file mode 100644
+index 000000000000..1b59dbc9a1ad
+--- /dev/null
++++ b/drivers/misc/ipts/uapi.c
+@@ -0,0 +1,190 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++#include <linux/types.h>
++
++#include "context.h"
++#include "control.h"
++#include "protocol.h"
++#include "uapi.h"
++
++struct ipts_uapi uapi;
++
++static ssize_t ipts_uapi_read(struct file *file, char __user *buf,
++		size_t count, loff_t *offset)
++{
++	int buffer;
++	int maxbytes;
++	struct ipts_context *ipts = uapi.ipts;
++
++	buffer = MINOR(file->f_path.dentry->d_inode->i_rdev);
++
++	if (!ipts || ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	maxbytes = ipts->device_info.data_size - *offset;
++	if (maxbytes <= 0 || count > maxbytes)
++		return -EINVAL;
++
++	if (copy_to_user(buf, ipts->data[buffer].address + *offset, count))
++		return -EFAULT;
++
++	return count;
++}
++
++static long ipts_uapi_ioctl_get_device_ready(struct ipts_context *ipts,
++		unsigned long arg)
++{
++	void __user *buffer = (void __user *)arg;
++	u8 ready = ipts->status == IPTS_HOST_STATUS_STARTED;
++
++	if (copy_to_user(buffer, &ready, sizeof(u8)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_get_device_info(struct ipts_context *ipts,
++		unsigned long arg)
++{
++	struct ipts_device_info info;
++	void __user *buffer = (void __user *)arg;
++
++	if (ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	info.vendor = ipts->device_info.vendor_id;
++	info.product = ipts->device_info.device_id;
++	info.version = ipts->device_info.fw_rev;
++	info.buffer_size = ipts->device_info.data_size;
++	info.max_contacts = ipts->device_info.max_contacts;
++
++	if (copy_to_user(buffer, &info, sizeof(struct ipts_device_info)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_get_doorbell(struct ipts_context *ipts,
++		unsigned long arg)
++{
++	void __user *buffer = (void __user *)arg;
++
++	if (ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	if (copy_to_user(buffer, ipts->doorbell.address, sizeof(u32)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl_send_feedback(struct ipts_context *ipts,
++		struct file *file)
++{
++	int ret;
++	struct ipts_feedback_cmd cmd;
++
++	if (ipts->status != IPTS_HOST_STATUS_STARTED)
++		return -ENODEV;
++
++	memset(&cmd, 0, sizeof(struct ipts_feedback_cmd));
++	cmd.buffer = MINOR(file->f_path.dentry->d_inode->i_rdev);
++
++	ret = ipts_control_send(ipts, IPTS_CMD_FEEDBACK,
++				&cmd, sizeof(struct ipts_feedback_cmd));
++
++	if (ret)
++		return -EFAULT;
++
++	return 0;
++}
++
++static long ipts_uapi_ioctl(struct file *file, unsigned int cmd,
++		unsigned long arg)
++{
++	struct ipts_context *ipts = uapi.ipts;
++
++	if (!ipts)
++		return -ENODEV;
++
++	switch (cmd) {
++	case IPTS_IOCTL_GET_DEVICE_READY:
++		return ipts_uapi_ioctl_get_device_ready(ipts, arg);
++	case IPTS_IOCTL_GET_DEVICE_INFO:
++		return ipts_uapi_ioctl_get_device_info(ipts, arg);
++	case IPTS_IOCTL_GET_DOORBELL:
++		return ipts_uapi_ioctl_get_doorbell(ipts, arg);
++	case IPTS_IOCTL_SEND_FEEDBACK:
++		return ipts_uapi_ioctl_send_feedback(ipts, file);
++	default:
++		return -ENOTTY;
++	}
++}
++
++static const struct file_operations ipts_uapi_fops = {
++	.owner = THIS_MODULE,
++	.read = ipts_uapi_read,
++	.unlocked_ioctl = ipts_uapi_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = ipts_uapi_ioctl,
++#endif
++};
++
++void ipts_uapi_link(struct ipts_context *ipts)
++{
++	uapi.ipts = ipts;
++}
++
++void ipts_uapi_unlink(void)
++{
++	uapi.ipts = NULL;
++}
++
++int ipts_uapi_init(void)
++{
++	int i, major;
++
++	alloc_chrdev_region(&uapi.dev, 0, IPTS_BUFFERS, "ipts");
++	uapi.class = class_create(THIS_MODULE, "ipts");
++
++	major = MAJOR(uapi.dev);
++
++	cdev_init(&uapi.cdev, &ipts_uapi_fops);
++	uapi.cdev.owner = THIS_MODULE;
++	cdev_add(&uapi.cdev, MKDEV(major, 0), IPTS_BUFFERS);
++
++	for (i = 0; i < IPTS_BUFFERS; i++) {
++		device_create(uapi.class, NULL,
++				MKDEV(major, i), NULL, "ipts/%d", i);
++	}
++
++	return 0;
++}
++
++void ipts_uapi_free(void)
++{
++	int i;
++	int major;
++
++	major = MAJOR(uapi.dev);
++
++	for (i = 0; i < IPTS_BUFFERS; i++)
++		device_destroy(uapi.class, MKDEV(major, i));
++
++	cdev_del(&uapi.cdev);
++
++	unregister_chrdev_region(MKDEV(major, 0), MINORMASK);
++	class_destroy(uapi.class);
++}
++
+diff --git a/drivers/misc/ipts/uapi.h b/drivers/misc/ipts/uapi.h
+new file mode 100644
+index 000000000000..4c667bb6a7f2
+--- /dev/null
++++ b/drivers/misc/ipts/uapi.h
+@@ -0,0 +1,47 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2016 Intel Corporation
++ * Copyright (c) 2020 Dorian Stoll
++ *
++ * Linux driver for Intel Precise Touch & Stylus
++ */
++
++#ifndef _IPTS_UAPI_H_
++#define _IPTS_UAPI_H_
++
++#include <linux/types.h>
++
++#include "context.h"
++
++struct ipts_uapi {
++	dev_t dev;
++	struct class *class;
++	struct cdev cdev;
++
++	struct ipts_context *ipts;
++};
++
++struct ipts_device_info {
++	__u16 vendor;
++	__u16 product;
++	__u32 version;
++	__u32 buffer_size;
++	__u8 max_contacts;
++
++	/* For future expansion */
++	__u8 reserved[19];
++};
++
++#define IPTS_IOCTL_GET_DEVICE_READY _IOR(0x86, 0x01, __u8)
++#define IPTS_IOCTL_GET_DEVICE_INFO  _IOR(0x86, 0x02, struct ipts_device_info)
++#define IPTS_IOCTL_GET_DOORBELL     _IOR(0x86, 0x03, __u32)
++#define IPTS_IOCTL_SEND_FEEDBACK    _IO(0x86, 0x04)
++
++void ipts_uapi_link(struct ipts_context *ipts);
++void ipts_uapi_unlink(void);
++
++int ipts_uapi_init(void);
++void ipts_uapi_free(void);
++
++#endif /* _IPTS_UAPI_H_ */
++
+-- 
+2.28.0
+

+ 387 - 0
patches/5.9/0004-surface-gpe.patch

@@ -0,0 +1,387 @@
+From 1e39f55f2a8c3c816fac769e915a898d3f7ed87a Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 16 Aug 2020 23:39:56 +0200
+Subject: [PATCH] platform/x86: Add Driver to set up lid GPEs on MS Surface
+ device
+
+Conventionally, wake-up events for a specific device, in our case the
+lid device, are managed via the ACPI _PRW field. While this does not
+seem strictly necessary based on ACPI spec, the kernel disables GPE
+wakeups to avoid non-wakeup interrupts preventing suspend by default and
+only enables GPEs associated via the _PRW field with a wake-up capable
+device. This behavior has been introduced in commit
+
+    f941d3e41da7f86bdb9dcc1977c2bcc6b89bfe47
+    ACPI: EC / PM: Disable non-wakeup GPEs for suspend-to-idle
+
+and is described in more detail in its commit message.
+
+Unfortunately, on MS Surface devices, there is no _PRW field present on
+the lid device, thus no GPE is associated with it, and therefore the GPE
+responsible for sending the status-change notification to the lid gets
+disabled during suspend, making it impossible to wake the device via the
+lid.
+
+This patch introduces a pseudo-device and respective driver which, based
+on some DMI matching, mark the corresponding GPE of the lid device for
+wake and enable it during suspend. The behavior of this driver models
+the behavior of the ACPI/PM core for normal wakeup GPEs, properly
+declared via the _PRW field.
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Patchset: surface-gpe
+---
+ drivers/platform/x86/Kconfig       |   9 +
+ drivers/platform/x86/Makefile      |   1 +
+ drivers/platform/x86/surface_gpe.c | 307 +++++++++++++++++++++++++++++
+ 3 files changed, 317 insertions(+)
+ create mode 100644 drivers/platform/x86/surface_gpe.c
+
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 0d91d136bc3b..d9d3c2149e8b 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -901,6 +901,15 @@ config SURFACE_PRO3_BUTTON
+ 	help
+ 	  This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+ 
++config SURFACE_GPE
++	tristate "Surface GPE/Lid Driver"
++	depends on ACPI
++	help
++	  This driver marks the GPEs related to the ACPI lid device found on
++	  Microsoft Surface devices as wakeup sources and prepares them
++	  accordingly. It is required on those devices to allow wake-ups from
++	  suspend by opening the lid.
++
+ config MSI_LAPTOP
+ 	tristate "MSI Laptop Extras"
+ 	depends on ACPI
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index 5f823f7eff45..c0d1c753eb3c 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -86,6 +86,7 @@ obj-$(CONFIG_SURFACE3_WMI)		+= surface3-wmi.o
+ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
++obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
+ 
+ # MSI
+ obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o
+diff --git a/drivers/platform/x86/surface_gpe.c b/drivers/platform/x86/surface_gpe.c
+new file mode 100644
+index 000000000000..2857e3862ca4
+--- /dev/null
++++ b/drivers/platform/x86/surface_gpe.c
+@@ -0,0 +1,307 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Surface GPE/Lid driver to enable wakeup from suspend via the lid by
++ * properly configuring the respective GPEs. Required for wakeup via lid on
++ * newer Intel-based Microsoft Surface devices.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/acpi.h>
++#include <linux/dmi.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++/*
++ * Note: The GPE numbers for the lid devices found below have been obtained
++ *       from ACPI/the DSDT table, specifically from the GPE handler for the
++ *       lid.
++ */
++
++static const struct property_entry lid_device_props_l17[] = {
++	PROPERTY_ENTRY_U32("gpe", 0x17),
++	{},
++};
++
++static const struct property_entry lid_device_props_l4D[] = {
++	PROPERTY_ENTRY_U32("gpe", 0x4D),
++	{},
++};
++
++static const struct property_entry lid_device_props_l4F[] = {
++	PROPERTY_ENTRY_U32("gpe", 0x4F),
++	{},
++};
++
++static const struct property_entry lid_device_props_l57[] = {
++	PROPERTY_ENTRY_U32("gpe", 0x57),
++	{},
++};
++
++/*
++ * Note: When changing this, don't forget to check that the MODULE_ALIAS below
++ *       still fits.
++ */
++static const struct dmi_system_id dmi_lid_device_table[] = {
++	{
++		.ident = "Surface Pro 4",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
++		},
++		.driver_data = (void *)lid_device_props_l17,
++	},
++	{
++		.ident = "Surface Pro 5",
++		.matches = {
++			/*
++			 * We match for SKU here due to generic product name
++			 * "Surface Pro".
++			 */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
++		},
++		.driver_data = (void *)lid_device_props_l4F,
++	},
++	{
++		.ident = "Surface Pro 5 (LTE)",
++		.matches = {
++			/*
++			 * We match for SKU here due to generic product name
++			 * "Surface Pro"
++			 */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
++		},
++		.driver_data = (void *)lid_device_props_l4F,
++	},
++	{
++		.ident = "Surface Pro 6",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
++		},
++		.driver_data = (void *)lid_device_props_l4F,
++	},
++	{
++		.ident = "Surface Pro 7",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"),
++		},
++		.driver_data = (void *)lid_device_props_l4D,
++	},
++	{
++		.ident = "Surface Book 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
++		},
++		.driver_data = (void *)lid_device_props_l17,
++	},
++	{
++		.ident = "Surface Book 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
++		},
++		.driver_data = (void *)lid_device_props_l17,
++	},
++	{
++		.ident = "Surface Book 3",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"),
++		},
++		.driver_data = (void *)lid_device_props_l4D,
++	},
++	{
++		.ident = "Surface Laptop 1",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
++		},
++		.driver_data = (void *)lid_device_props_l57,
++	},
++	{
++		.ident = "Surface Laptop 2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
++		},
++		.driver_data = (void *)lid_device_props_l57,
++	},
++	{
++		.ident = "Surface Laptop 3 (Intel 13\")",
++		.matches = {
++			/*
++			 * We match for SKU here due to different vairants: The
++			 * AMD (15") version does not rely on GPEs.
++			 */
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"),
++		},
++		.driver_data = (void *)lid_device_props_l4D,
++	},
++	{ }
++};
++
++struct surface_lid_device {
++	u32 gpe_number;
++};
++
++static int surface_lid_enable_wakeup(struct device *dev, bool enable)
++{
++	const struct surface_lid_device *lid = dev_get_drvdata(dev);
++	int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE;
++	acpi_status status;
++
++	status = acpi_set_gpe_wake_mask(NULL, lid->gpe_number, action);
++	if (ACPI_FAILURE(status)) {
++		dev_err(dev, "failed to set GPE wake mask: %s\n",
++			acpi_format_exception(status));
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int surface_gpe_suspend(struct device *dev)
++{
++	return surface_lid_enable_wakeup(dev, true);
++}
++
++static int surface_gpe_resume(struct device *dev)
++{
++	return surface_lid_enable_wakeup(dev, false);
++}
++
++static SIMPLE_DEV_PM_OPS(surface_gpe_pm, surface_gpe_suspend, surface_gpe_resume);
++
++static int surface_gpe_probe(struct platform_device *pdev)
++{
++	struct surface_lid_device *lid;
++	u32 gpe_number;
++	acpi_status status;
++	int ret;
++
++	ret = device_property_read_u32(&pdev->dev, "gpe", &gpe_number);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to read 'gpe' property: %d\n", ret);
++		return ret;
++	}
++
++	lid = devm_kzalloc(&pdev->dev, sizeof(*lid), GFP_KERNEL);
++	if (!lid)
++		return -ENOMEM;
++
++	lid->gpe_number = gpe_number;
++	platform_set_drvdata(pdev, lid);
++
++	status = acpi_mark_gpe_for_wake(NULL, gpe_number);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "failed to mark GPE for wake: %s\n",
++			acpi_format_exception(status));
++		return -EINVAL;
++	}
++
++	status = acpi_enable_gpe(NULL, gpe_number);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "failed to enable GPE: %s\n",
++			acpi_format_exception(status));
++		return -EINVAL;
++	}
++
++	ret = surface_lid_enable_wakeup(&pdev->dev, false);
++	if (ret)
++		acpi_disable_gpe(NULL, gpe_number);
++
++	return ret;
++}
++
++static int surface_gpe_remove(struct platform_device *pdev)
++{
++	struct surface_lid_device *lid = dev_get_drvdata(&pdev->dev);
++
++	/* restore default behavior without this module */
++	surface_lid_enable_wakeup(&pdev->dev, false);
++	acpi_disable_gpe(NULL, lid->gpe_number);
++
++	return 0;
++}
++
++static struct platform_driver surface_gpe_driver = {
++	.probe = surface_gpe_probe,
++	.remove = surface_gpe_remove,
++	.driver = {
++		.name = "surface_gpe",
++		.pm = &surface_gpe_pm,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static struct platform_device *surface_gpe_device;
++
++static int __init surface_gpe_init(void)
++{
++	const struct dmi_system_id *match;
++	struct platform_device *pdev;
++	struct fwnode_handle *fwnode;
++	int status;
++
++	match = dmi_first_match(dmi_lid_device_table);
++	if (!match) {
++		pr_info("no compatible Microsoft Surface device found, exiting\n");
++		return -ENODEV;
++	}
++
++	status = platform_driver_register(&surface_gpe_driver);
++	if (status)
++		return status;
++
++	fwnode = fwnode_create_software_node(match->driver_data, NULL);
++	if (IS_ERR(fwnode)) {
++		status = PTR_ERR(fwnode);
++		goto err_node;
++	}
++
++	pdev = platform_device_alloc("surface_gpe", PLATFORM_DEVID_NONE);
++	if (!pdev) {
++		status = -ENOMEM;
++		goto err_alloc;
++	}
++
++	pdev->dev.fwnode = fwnode;
++
++	status = platform_device_add(pdev);
++	if (status)
++		goto err_add;
++
++	surface_gpe_device = pdev;
++	return 0;
++
++err_add:
++	platform_device_put(pdev);
++err_alloc:
++	fwnode_remove_software_node(fwnode);
++err_node:
++	platform_driver_unregister(&surface_gpe_driver);
++	return status;
++}
++module_init(surface_gpe_init);
++
++static void __exit surface_gpe_exit(void)
++{
++	struct fwnode_handle *fwnode = surface_gpe_device->dev.fwnode;
++
++	platform_device_unregister(surface_gpe_device);
++	platform_driver_unregister(&surface_gpe_driver);
++	fwnode_remove_software_node(fwnode);
++}
++module_exit(surface_gpe_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Surface GPE/Lid Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("dmi:*:svnMicrosoftCorporation:pnSurface*:*");
+-- 
+2.28.0
+

+ 334 - 0
patches/5.9/0005-surface-sam-over-hid.patch

@@ -0,0 +1,334 @@
+From 824f294b006f50b558734a7d0240e132dd613ca9 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sat, 25 Jul 2020 17:19:53 +0200
+Subject: [PATCH] i2c: acpi: Implement RawBytes read access
+
+Microsoft Surface Pro 4 and Book 1 devices access the MSHW0030 I2C
+device via a generic serial bus operation region and RawBytes read
+access. On the Surface Book 1, this access is required to turn on (and
+off) the discrete GPU.
+
+Multiple things are to note here:
+
+a) The RawBytes access is device/driver dependent. The ACPI
+   specification states:
+
+   > Raw accesses assume that the writer has knowledge of the bus that
+   > the access is made over and the device that is being accessed. The
+   > protocol may only ensure that the buffer is transmitted to the
+   > appropriate driver, but the driver must be able to interpret the
+   > buffer to communicate to a register.
+
+   Thus this implementation may likely not work on other devices
+   accessing I2C via the RawBytes accessor type.
+
+b) The MSHW0030 I2C device is an HID-over-I2C device which seems to
+   serve multiple functions:
+
+   1. It is the main access point for the legacy-type Surface Aggregator
+      Module (also referred to as SAM-over-HID, as opposed to the newer
+      SAM-over-SSH/UART). It has currently not been determined on how
+      support for the legacy SAM should be implemented. Likely via a
+      custom HID driver.
+
+   2. It seems to serve as the HID device for the Integrated Sensor Hub.
+      This might complicate matters with regards to implementing a
+      SAM-over-HID driver required by legacy SAM.
+
+In light of this, the simplest approach has been chosen for now.
+However, it may make more sense regarding breakage and compatibility to
+either provide functionality for replacing or enhancing the default
+operation region handler via some additional API functions, or even to
+completely blacklist MSHW0030 from the I2C core and provide a custom
+driver for it.
+
+Replacing/enhancing the default operation region handler would, however,
+either require some sort of secondary driver and access point for it,
+from which the new API functions would be called and the new handler
+(part) would be installed, or hard-coding them via some sort of
+quirk-like interface into the I2C core.
+
+Patchset: surface-sam-over-hid
+---
+ drivers/i2c/i2c-core-acpi.c | 35 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index e627d7b2790f..8820131da748 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -564,6 +564,28 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+ 	return (ret == 1) ? 0 : -EIO;
+ }
+ 
++static int acpi_gsb_i2c_write_raw_bytes(struct i2c_client *client,
++		u8 *data, u8 data_len)
++{
++	struct i2c_msg msgs[1];
++	int ret = AE_OK;
++
++	msgs[0].addr = client->addr;
++	msgs[0].flags = client->flags;
++	msgs[0].len = data_len + 1;
++	msgs[0].buf = data;
++
++	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++
++	if (ret < 0) {
++		dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret);
++		return ret;
++	}
++
++	/* 1 transfer must have completed successfully */
++	return (ret == 1) ? 0 : -EIO;
++}
++
+ static acpi_status
+ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
+ 			u32 bits, u64 *value64,
+@@ -665,6 +687,19 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
+ 		}
+ 		break;
+ 
++	case ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES:
++		if (action == ACPI_READ) {
++			dev_warn(&adapter->dev,
++				 "protocol 0x%02x not supported for client 0x%02x\n",
++				 accessor_type, client->addr);
++			ret = AE_BAD_PARAMETER;
++			goto err;
++		} else {
++			status = acpi_gsb_i2c_write_raw_bytes(client,
++					gsb->data, info->access_length);
++		}
++		break;
++
+ 	default:
+ 		dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n",
+ 			 accessor_type, client->addr);
+-- 
+2.28.0
+
+From e47b3deefb0fb2c24b65d7b9271c15c0abf4491e Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Sun, 6 Sep 2020 04:01:19 +0200
+Subject: [PATCH] platform/x86: Add driver for Surface Book 1 dGPU switch
+
+Add driver exposing the discrete GPU power-switch of the  Microsoft
+Surface Book 1 to user-space.
+
+On the Surface Book 1, the dGPU power is controlled via the Surface
+System Aggregator Module (SAM). The specific SAM-over-HID command for
+this is exposed via ACPI. This module provides a simple driver exposing
+the ACPI call via a sysfs parameter to user-space, so that users can
+easily power-on/-off the dGPU.
+
+Patchset: surface-sam-over-hid
+---
+ drivers/platform/x86/Kconfig       |   7 ++
+ drivers/platform/x86/Makefile      |   1 +
+ drivers/platform/x86/sb1_dgpu_sw.c | 162 +++++++++++++++++++++++++++++
+ 3 files changed, 170 insertions(+)
+ create mode 100644 drivers/platform/x86/sb1_dgpu_sw.c
+
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index d9d3c2149e8b..a9b12f4dcbd1 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -910,6 +910,13 @@ config SURFACE_GPE
+ 	  accordingly. It is required on those devices to allow wake-ups from
+ 	  suspend by opening the lid.
+ 
++config SURFACE_BOOK1_DGPU_SWITCH
++	tristate "Surface Book 1 dGPU Switch Driver"
++	depends on ACPI && SYSFS
++	help
++	  This driver provides a sysfs switch to set the power-state of the
++	  discrete GPU found on the Microsoft Surface Book 1.
++
+ config MSI_LAPTOP
+ 	tristate "MSI Laptop Extras"
+ 	depends on ACPI
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index c0d1c753eb3c..562d83940e7b 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -87,6 +87,7 @@ obj-$(CONFIG_SURFACE_3_BUTTON)		+= surface3_button.o
+ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)	+= surface3_power.o
+ obj-$(CONFIG_SURFACE_PRO3_BUTTON)	+= surfacepro3_button.o
+ obj-$(CONFIG_SURFACE_GPE)		+= surface_gpe.o
++obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH)	+= sb1_dgpu_sw.o
+ 
+ # MSI
+ obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o
+diff --git a/drivers/platform/x86/sb1_dgpu_sw.c b/drivers/platform/x86/sb1_dgpu_sw.c
+new file mode 100644
+index 000000000000..8c66ed5110fd
+--- /dev/null
++++ b/drivers/platform/x86/sb1_dgpu_sw.c
+@@ -0,0 +1,162 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/acpi.h>
++#include <linux/platform_device.h>
++
++
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
++#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
++
++
++static const guid_t dgpu_sw_guid = GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4,
++	0x95, 0xed, 0xab, 0x16, 0x65, 0x49, 0x80, 0x35);
++
++#define DGPUSW_ACPI_PATH_DSM	"\\_SB_.PCI0.LPCB.EC0_.VGBI"
++#define DGPUSW_ACPI_PATH_HGON	"\\_SB_.PCI0.RP05.HGON"
++#define DGPUSW_ACPI_PATH_HGOF	"\\_SB_.PCI0.RP05.HGOF"
++
++
++static int sb1_dgpu_sw_dsmcall(void)
++{
++	union acpi_object *ret;
++	acpi_handle handle;
++	acpi_status status;
++
++	status = acpi_get_handle(NULL, DGPUSW_ACPI_PATH_DSM, &handle);
++	if (status)
++		return -EINVAL;
++
++	ret = acpi_evaluate_dsm_typed(handle, &dgpu_sw_guid, 1, 1, NULL, ACPI_TYPE_BUFFER);
++	if (!ret)
++		return -EINVAL;
++
++	ACPI_FREE(ret);
++	return 0;
++}
++
++static int sb1_dgpu_sw_hgon(void)
++{
++	struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
++	acpi_status status;
++
++	status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGON, NULL, &buf);
++	if (status) {
++		pr_err("failed to run HGON: %d\n", status);
++		return -EINVAL;
++	}
++
++	if (buf.pointer)
++		ACPI_FREE(buf.pointer);
++
++	pr_info("turned-on dGPU via HGON\n");
++	return 0;
++}
++
++static int sb1_dgpu_sw_hgof(void)
++{
++	struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
++	acpi_status status;
++
++	status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGOF, NULL, &buf);
++	if (status) {
++		pr_err("failed to run HGOF: %d\n", status);
++		return -EINVAL;
++	}
++
++	if (buf.pointer)
++		ACPI_FREE(buf.pointer);
++
++	pr_info("turned-off dGPU via HGOF\n");
++	return 0;
++}
++
++
++static ssize_t dgpu_dsmcall_store(struct device *dev, struct device_attribute *attr,
++				  const char *buf, size_t len)
++{
++	int status, value;
++
++	status = kstrtoint(buf, 0, &value);
++	if (status < 0)
++		return status;
++
++	if (value != 1)
++		return -EINVAL;
++
++	status = sb1_dgpu_sw_dsmcall();
++
++	return status < 0 ? status : len;
++}
++
++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
++				const char *buf, size_t len)
++{
++	bool power;
++	int status;
++
++	status = kstrtobool(buf, &power);
++	if (status < 0)
++		return status;
++
++	if (power)
++		status = sb1_dgpu_sw_hgon();
++	else
++		status = sb1_dgpu_sw_hgof();
++
++	return status < 0 ? status : len;
++}
++
++static DEVICE_ATTR_WO(dgpu_dsmcall);
++static DEVICE_ATTR_WO(dgpu_power);
++
++static struct attribute *sb1_dgpu_sw_attrs[] = {
++	&dev_attr_dgpu_dsmcall.attr,
++	&dev_attr_dgpu_power.attr,
++	NULL,
++};
++
++static const struct attribute_group sb1_dgpu_sw_attr_group = {
++	.attrs = sb1_dgpu_sw_attrs,
++};
++
++
++static int sb1_dgpu_sw_probe(struct platform_device *pdev)
++{
++	return sysfs_create_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group);
++}
++
++static int sb1_dgpu_sw_remove(struct platform_device *pdev)
++{
++	sysfs_remove_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group);
++	return 0;
++}
++
++/*
++ * The dGPU power seems to be actually handled by MSHW0040. However, that is
++ * also the power-/volume-button device with a mainline driver. So let's use
++ * MSHW0041 instead for now, which seems to be the LTCH (latch/DTX) device.
++ */
++static const struct acpi_device_id sb1_dgpu_sw_match[] = {
++	{ "MSHW0041", },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, sb1_dgpu_sw_match);
++
++static struct platform_driver sb1_dgpu_sw = {
++	.probe = sb1_dgpu_sw_probe,
++	.remove = sb1_dgpu_sw_remove,
++	.driver = {
++		.name = "sb1_dgpu_sw",
++		.acpi_match_table = sb1_dgpu_sw_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(sb1_dgpu_sw);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Discrete GPU Power-Switch for Surface Book 1");
++MODULE_LICENSE("GPL");
+-- 
+2.28.0
+

+ 20367 - 0
patches/5.9/0006-surface-sam.patch

@@ -0,0 +1,20367 @@
+From 6841d3c1ca0d911bf0e01d9d818b240b95f44176 Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 17 Aug 2020 01:23:20 +0200
+Subject: [PATCH] misc: surface_sam: Add file2alias support for Surface SAM
+ devices
+
+Implement file2alias support for Surface System Aggregator Module (SSAM)
+devices. This allows modules to be auto-loaded for specific devices via
+their respective module alias.
+
+Patchset: surface-sam
+---
+ include/linux/mod_devicetable.h   | 17 +++++++++++++++++
+ scripts/mod/devicetable-offsets.c |  7 +++++++
+ scripts/mod/file2alias.c          | 21 +++++++++++++++++++++
+ 3 files changed, 45 insertions(+)
+
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 5b08a473cdba..ef64063fac30 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -838,4 +838,21 @@ struct mhi_device_id {
+ 	kernel_ulong_t driver_data;
+ };
+ 
++/* Surface System Aggregator Module */
++
++#define SSAM_MATCH_CHANNEL	0x1
++#define SSAM_MATCH_INSTANCE	0x2
++#define SSAM_MATCH_FUNCTION	0x4
++
++struct ssam_device_id {
++	__u8 match_flags;
++
++	__u8 category;
++	__u8 channel;
++	__u8 instance;
++	__u8 function;
++
++	kernel_ulong_t driver_data;
++};
++
+ #endif /* LINUX_MOD_DEVICETABLE_H */
+diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
+index 27007c18e754..bcff122d0dc8 100644
+--- a/scripts/mod/devicetable-offsets.c
++++ b/scripts/mod/devicetable-offsets.c
+@@ -243,5 +243,12 @@ int main(void)
+ 	DEVID(mhi_device_id);
+ 	DEVID_FIELD(mhi_device_id, chan);
+ 
++	DEVID(ssam_device_id);
++	DEVID_FIELD(ssam_device_id, match_flags);
++	DEVID_FIELD(ssam_device_id, category);
++	DEVID_FIELD(ssam_device_id, channel);
++	DEVID_FIELD(ssam_device_id, instance);
++	DEVID_FIELD(ssam_device_id, function);
++
+ 	return 0;
+ }
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 2417dd1dee33..a6c583362b92 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1368,6 +1368,26 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
+ 	return 1;
+ }
+ 
++/* Looks like: ssam:cNtNiNfN
++ *
++ * N is exactly 2 digits, where each is an upper-case hex digit.
++ */
++static int do_ssam_entry(const char *filename, void *symval, char *alias)
++{
++	DEF_FIELD(symval, ssam_device_id, match_flags);
++	DEF_FIELD(symval, ssam_device_id, category);
++	DEF_FIELD(symval, ssam_device_id, channel);
++	DEF_FIELD(symval, ssam_device_id, instance);
++	DEF_FIELD(symval, ssam_device_id, function);
++
++	sprintf(alias, "ssam:c%02X", category);
++	ADD(alias, "t", match_flags & SSAM_MATCH_CHANNEL, channel);
++	ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
++	ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
++
++	return 1;
++}
++
+ /* Does namelen bytes of name exactly match the symbol? */
+ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
+ {
+@@ -1442,6 +1462,7 @@ static const struct devtable devtable[] = {
+ 	{"tee", SIZE_tee_client_device_id, do_tee_entry},
+ 	{"wmi", SIZE_wmi_device_id, do_wmi_entry},
+ 	{"mhi", SIZE_mhi_device_id, do_mhi_entry},
++	{"ssam", SIZE_ssam_device_id, do_ssam_entry},
+ };
+ 
+ /* Create MODULE_ALIAS() statements.
+-- 
+2.28.0
+
+From ca97ca61e1b003b34e4f713397c7e8f3d878cf0b Mon Sep 17 00:00:00 2001
+From: Maximilian Luz <luzmaximilian@gmail.com>
+Date: Mon, 17 Aug 2020 01:44:30 +0200
+Subject: [PATCH] misc: Add support for Surface System Aggregator Module
+
+Add support for the Surface System Aggregator Module (SSAM), an embedded
+controller that can be found on 5th and later generation Microsoft
+Surface devices. The responsibilities of this EC vary from device to
+device. It provides battery information on all 5th and later generation
+devices, temperature sensor and cooling capability access, functionality
+for clipboard detaching on the Surface Books (2 and 3), as well as
+HID-over-SSAM input devices, including keyboard on the Surface Laptop 1
+and 2, and keyboard as well as touchpad input on the Surface Laptop 3
+and Surface Book 3.
+
+Patchset: surface-sam
+---
+ Documentation/driver-api/index.rst            |    1 +
+ .../surface_aggregator/client-api.rst         |   38 +
+ .../driver-api/surface_aggregator/client.rst  |  394 +++
+ .../surface_aggregator/clients/cdev.rst       |   85 +
+ .../surface_aggregator/clients/dtx.rst        |  712 +++++
+ .../surface_aggregator/clients/index.rst      |   22 +
+ .../surface_aggregator/clients/san.rst        |   44 +
+ .../driver-api/surface_aggregator/index.rst   |   21 +
+ .../surface_aggregator/internal-api.rst       |   67 +
+ .../surface_aggregator/internal.rst           |   50 +
+ .../surface_aggregator/overview.rst           |   76 +
+ .../driver-api/surface_aggregator/ssh.rst     |  343 +++
+ drivers/misc/Kconfig                          |    1 +
+ drivers/misc/Makefile                         |    1 +
+ drivers/misc/surface_aggregator/Kconfig       |   67 +
+ drivers/misc/surface_aggregator/Makefile      |   18 +
+ drivers/misc/surface_aggregator/bus.c         |  424 +++
+ drivers/misc/surface_aggregator/bus.h         |   27 +
+ .../misc/surface_aggregator/clients/Kconfig   |  151 +
+ .../misc/surface_aggregator/clients/Makefile  |   11 +
+ .../clients/surface_acpi_notify.c             |  884 ++++++
+ .../clients/surface_aggregator_cdev.c         |  299 ++
+ .../clients/surface_aggregator_registry.c     |  652 +++++
+ .../clients/surface_battery.c                 | 1196 ++++++++
+ .../surface_aggregator/clients/surface_dtx.c  | 1270 ++++++++
+ .../surface_aggregator/clients/surface_hid.c  |  925 ++++++
+ .../clients/surface_hotplug.c                 | 1285 +++++++++
+ .../clients/surface_perfmode.c                |  122 +
+ drivers/misc/surface_aggregator/controller.c  | 2555 +++++++++++++++++
+ drivers/misc/surface_aggregator/controller.h  |  288 ++
+ drivers/misc/surface_aggregator/core.c        |  831 ++++++
+ drivers/misc/surface_aggregator/ssh_msgb.h    |  201 ++
+ .../surface_aggregator/ssh_packet_layer.c     | 2009 +++++++++++++
+ .../surface_aggregator/ssh_packet_layer.h     |  175 ++
+ drivers/misc/surface_aggregator/ssh_parser.c  |  229 ++
+ drivers/misc/surface_aggregator/ssh_parser.h  |  157 +
+ .../surface_aggregator/ssh_request_layer.c    | 1254 ++++++++
+ .../surface_aggregator/ssh_request_layer.h    |  142 +
+ drivers/misc/surface_aggregator/trace.h       |  625 ++++
+ include/linux/mod_devicetable.h               |    5 +-
+ include/linux/surface_acpi_notify.h           |   39 +
+ include/linux/surface_aggregator/controller.h |  815 ++++++
+ include/linux/surface_aggregator/device.h     |  430 +++
+ include/linux/surface_aggregator/serial_hub.h |  659 +++++
+ include/uapi/linux/surface_aggregator/cdev.h  |   58 +
+ include/uapi/linux/surface_aggregator/dtx.h   |  150 +
+ scripts/mod/devicetable-offsets.c             |    3 +-
+ scripts/mod/file2alias.c                      |   10 +-
+ 48 files changed, 19814 insertions(+), 7 deletions(-)
+ create mode 100644 Documentation/driver-api/surface_aggregator/client-api.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/client.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/cdev.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/dtx.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/index.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/clients/san.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/index.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/internal-api.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/internal.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/overview.rst
+ create mode 100644 Documentation/driver-api/surface_aggregator/ssh.rst
+ create mode 100644 drivers/misc/surface_aggregator/Kconfig
+ create mode 100644 drivers/misc/surface_aggregator/Makefile
+ create mode 100644 drivers/misc/surface_aggregator/bus.c
+ create mode 100644 drivers/misc/surface_aggregator/bus.h
+ create mode 100644 drivers/misc/surface_aggregator/clients/Kconfig
+ create mode 100644 drivers/misc/surface_aggregator/clients/Makefile
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_acpi_notify.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_aggregator_cdev.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_aggregator_registry.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_battery.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_dtx.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_hid.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_hotplug.c
+ create mode 100644 drivers/misc/surface_aggregator/clients/surface_perfmode.c
+ create mode 100644 drivers/misc/surface_aggregator/controller.c
+ create mode 100644 drivers/misc/surface_aggregator/controller.h
+ create mode 100644 drivers/misc/surface_aggregator/core.c
+ create mode 100644 drivers/misc/surface_aggregator/ssh_msgb.h
+ create mode 100644 drivers/misc/surface_aggregator/ssh_packet_layer.c
+ create mode 100644 drivers/misc/surface_aggregator/ssh_packet_layer.h
+ create mode 100644 drivers/misc/surface_aggregator/ssh_parser.c
+ create mode 100644 drivers/misc/surface_aggregator/ssh_parser.h
+ create mode 100644 drivers/misc/surface_aggregator/ssh_request_layer.c
+ create mode 100644 drivers/misc/surface_aggregator/ssh_request_layer.h
+ create mode 100644 drivers/misc/surface_aggregator/trace.h
+ create mode 100644 include/linux/surface_acpi_notify.h
+ create mode 100644 include/linux/surface_aggregator/controller.h
+ create mode 100644 include/linux/surface_aggregator/device.h
+ create mode 100644 include/linux/surface_aggregator/serial_hub.h
+ create mode 100644 include/uapi/linux/surface_aggregator/cdev.h
+ create mode 100644 include/uapi/linux/surface_aggregator/dtx.h
+
+diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
+index 5ef2cfe3a16b..dbb5f7353022 100644
+--- a/Documentation/driver-api/index.rst
++++ b/Documentation/driver-api/index.rst
+@@ -100,6 +100,7 @@ available subsections can be seen below.
+    rfkill
+    serial/index
+    sm501
++   surface_aggregator/index
+    switchtec
+    sync_file
+    vfio-mediated-device
+diff --git a/Documentation/driver-api/surface_aggregator/client-api.rst b/Documentation/driver-api/surface_aggregator/client-api.rst
+new file mode 100644
+index 000000000000..a1117d57036a
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/client-api.rst
+@@ -0,0 +1,38 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++===============================
++Client Driver API Documentation
++===============================
++
++.. contents::
++    :depth: 2
++
++
++Serial Hub Communication
++========================
++
++.. kernel-doc:: include/linux/surface_aggregator/serial_hub.h
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.c
++    :export:
++
++
++Controller and Core Interface
++=============================
++
++.. kernel-doc:: include/linux/surface_aggregator/controller.h
++
++.. kernel-doc:: drivers/misc/surface_aggregator/controller.c
++    :export:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/core.c
++    :export:
++
++
++Client Bus and Client Device API
++================================
++
++.. kernel-doc:: include/linux/surface_aggregator/device.h
++
++.. kernel-doc:: drivers/misc/surface_aggregator/bus.c
++    :export:
+diff --git a/Documentation/driver-api/surface_aggregator/client.rst b/Documentation/driver-api/surface_aggregator/client.rst
+new file mode 100644
+index 000000000000..41c17bb63bef
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/client.rst
+@@ -0,0 +1,394 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
++.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
++.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
++.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
++.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
++.. |ssam_get_controller| replace:: :c:func:`ssam_get_controller`
++.. |ssam_controller_get| replace:: :c:func:`ssam_controller_get`
++.. |ssam_controller_put| replace:: :c:func:`ssam_controller_put`
++.. |ssam_device_alloc| replace:: :c:func:`ssam_device_alloc`
++.. |ssam_device_add| replace:: :c:func:`ssam_device_add`
++.. |ssam_device_remove| replace:: :c:func:`ssam_device_remove`
++.. |ssam_device_driver_register| replace:: :c:func:`ssam_device_driver_register`
++.. |ssam_device_driver_unregister| replace:: :c:func:`ssam_device_driver_unregister`
++.. |module_ssam_device_driver| replace:: :c:func:`module_ssam_device_driver`
++.. |SSAM_DEVICE| replace:: :c:func:`SSAM_DEVICE`
++.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
++.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
++.. |ssam_request_sync| replace:: :c:func:`ssam_request_sync`
++.. |ssam_event_mask| replace:: :c:type:`enum ssam_event_mask <ssam_event_mask>`
++
++
++======================
++Writing Client Drivers
++======================
++
++For the API documentation, refer to:
++
++.. toctree::
++   :maxdepth: 2
++
++   client-api
++
++
++Overview
++========
++
++Client drivers can be set up in two main ways, depending on how the
++corresponding device is made available to the system. We specifically
++differentiate between devices that are presented to the system via one of
++the conventional ways, e.g. as platform devices via ACPI, and devices that
++are non-discoverable and instead need to be explicitly provided by some
++other mechanism, as discussed further below.
++
++
++Non-SSAM Client Drivers
++=======================
++
++All communication with the SAM EC is handled via the |ssam_controller|
++representing that EC to the kernel. Drivers targetting a non-SSAM device
++(and thus not being a |ssam_device_driver|) need to explicitly establish a
++connection/relation to that controller. This can be done via the
++|ssam_client_bind| function. Said function returns a reference to the SSAM
++controller, but, more importantly, also establishes a device link between
++client device and controller (this can also be done separate via
++|ssam_client_link|). It is important to do this, as it, first, guarantees
++that the returned controller is valid for use in the client driver for as
++long as this driver is bound to its device, i.e. that the driver gets
++un-bound before the controller ever becomes invalid, and, second, as it
++ensures correct suspend/resume ordering. This setup should be done in the
++driver's probe function, and may be used to defer probing in case the SSAM
++subsystem is not ready yet, for example:
++
++.. code-block:: c
++
++   static int client_driver_probe(struct platform_device *pdev)
++   {
++           struct ssam_controller *ctrl;
++           int status;
++
++           status = ssam_client_bind(&pdev->dev, &ctrl);
++           if (status)
++                   return status == -ENXIO ? -EPROBE_DEFER : status;
++
++           // ...
++
++           return 0;
++   }
++
++The controller may be separately obtained via |ssam_get_controller| and its
++lifetime be guaranteed via |ssam_controller_get| and |ssam_controller_put|.
++Note that none of these functions, however, guarantee that the controller
++will not be shut down or suspended. These functions essentially only operate
++on the reference, i.e. only guarantee a bare minimum of accessability
++without any guarantees at all on practical operability.
++
++
++Adding SSAM Devices
++===================
++
++If a device does not already exist/is not already provided via conventional
++means, it should be provided as |ssam_device| via the SSAM client device
++hub. New devices can be added to this hub by entering their UID into the
++corresponding registry. SSAM devices can also be manually allocated via
++|ssam_device_alloc|, subsequently to which they have to be added via
++|ssam_device_add| and eventually removed via |ssam_device_remove|. By
++default, the parent of the device is set to the controller device provided
++for allocation, however this may be changed before the device is added. Note
++that, when changing the parent device, care must be taken to ensure that the
++controller lifetime and suspend/resume ordering guarantees, in the default
++setup provided through the parent-child relation, are preserved. If
++necessary, by use of |ssam_client_link| as is done for non-SSAM client
++drivers and described in more detail above.
++
++A client device must always be removed by the party which added the
++respective device before the controller shuts down. Such removal can be
++guaranteed by linking the driver providing the SSAM device to the controller
++via |ssam_client_link|, causing it to unbind before the controller driver
++unbinds. Client devices registered with the controller as parent are
++automatically removed when the controller shuts down, but this should not be
++relied upon, especially as this does not extend to client devices with a
++different parent.
++
++
++SSAM Client Drivers
++===================
++
++SSAM client device drivers are, in essence, no different than other device
++driver types. They are represented via |ssam_device_driver| and bind to a
++|ssam_device| via its UID (:c:type:`struct ssam_device.uid <ssam_device>`)
++member and the match table
++(:c:type:`struct ssam_device_driver.match_table <ssam_device_driver>`),
++which should be set when declaring the driver struct instance. Refer to the
++|SSAM_DEVICE| macro documentation for more details on how to define members
++of the driver's match table.
++
++The UID for SSAM client devices consists of a ``domain``, a ``category``,
++a ``target``, an ``instance``, and a ``function``. The ``domain`` is used
++differentiate between physical SAM devices
++(:c:type:`SSAM_DOMAIN_SERIALHUB <ssam_device_domain>`), i.e. devices that can
++be accessed via the Surface Serial Hub, and virtual ones
++(:c:type:`SSAM_DOMAIN_VIRTUAL <ssam_device_domain>`), such as client-device
++hubs, that have no real representation on the SAM EC and are solely used on
++the kernel/driver-side. For physical devices, ``category`` represents the
++target category, ``target`` the target ID, and ``instace`` the instance ID
++used to access the physical SAM device. In addition, ``function`` references
++a specific device functionality, but has no meaning to the SAM EC. The
++(default) name of a client device is generated based on its UID.
++
++A driver instance can be registered via |ssam_device_driver_register| and
++unregistered via |ssam_device_driver_unregister|. For convenience, the
++|module_ssam_device_driver| macro may be used to define module init- and
++exit-functions registering the driver.
++
++The controller associated with a SSAM client device can be found in its
++:c:type:`struct ssam_device.ctrl <ssam_device>` member. This reference is
++guaranteed to be valid for at least as long as the client driver is bound,
++but should also be valid for as long as the client device exists. Note,
++however, that access outside of the bound client driver must ensure that the
++controller device is not suspended while making any requests or
++(un)registering event notifiers (and thus should generally be avoided). This
++is guaranteed when the controller is accessed from inside the bound client
++driver.
++
++
++Making Synchronous Requests
++===========================
++
++Synchronous requests are (currently) the main form of host-initiated
++communication with the EC. There are a couple of ways to define and execute
++such requests, however, most of them boil down to something similar as shown
++in the example below. This example defines a write-read request, meaning
++that the caller provides an argument to the SAM EC and receives a response.
++The caller needs to know the (maximum) length of the response payload and
++provide a buffer for it.
++
++Care must be taken to ensure that any command payload data passed to the SAM
++EC is provided in little-endian format and, similarly, any response payload
++data received from it is converted from little-endian to host endianness.
++
++.. code-block:: c
++
++   int perform_request(struct ssam_controller *ctrl, u32 arg, u32 *ret)
++   {
++           struct ssam_request rqst;
++           struct ssam_response resp;
++           int status;
++
++           /* Convert request argument to little-endian. */
++           __le32 arg_le = cpu_to_le32(arg);
++           __le32 ret_le = cpu_to_le32(0);
++
++           /*
++            * Initialize request specification. Replace this with your values.
++            * The rqst.payload field may be NULL if rqst.length is zero,
++            * indicating that the request does not have any argument.
++            *
++            * Note: The request parameters used here are not valid, i.e.
++            *       they do not correspond to an actual SAM/EC request.
++            */
++           rqst.target_category = SSAM_SSH_TC_SAM;
++           rqst.target_id = 0x01;
++           rqst.command_id = 0x02;
++           rqst.instance_id = 0x03;
++           rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++           rqst.length = sizeof(arg_le);
++           rqst.payload = (u8 *)&arg_le;
++
++           /* Initialize request response. */
++           resp.capacity = sizeof(ret_le);
++           resp.length = 0;
++           resp.pointer = (u8 *)&ret_le;
++
++           /*
++            * Perform actual request. The response pointer may be null in case
++            * the request does not have any response. This must be consistent
++            * with the SSAM_REQUEST_HAS_RESPONSE flag set in the specification
++            * above.
++            */
++           status = ssam_request_sync(ctrl, &rqst, &resp);
++           if (status)
++               return status;
++
++           /*
++            * Alternatively use
++            *
++            *   ssam_request_sync_onstack(ctrl, &rqst, &resp, sizeof(arg_le));
++            *
++            * to perform the request, allocating the message buffer directly
++            * on the stack as opposed to via kzalloc(.
++            */
++
++           /*
++            * Convert request response back to native format. Note that in the
++            * error case, this value is not touched.
++            */
++           *ret = le32_to_cpu(ret_le);
++
++           return status;
++   }
++
++Note that |ssam_request_sync| in its essence is a wrapper over lower-level
++request primitives, which may also be used to perform requests. Refer to its
++implementation and documentation for more details.
++
++An arguably more user-friendly way of defining such functions is by using
++one of the generator macros, for example via:
++
++.. code-block:: c
++
++   SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
++           .target_category = SSAM_SSH_TC_TMP,
++           .target_id       = 0x01,
++           .command_id      = 0x03,
++           .instance_id     = 0x00,
++   });
++
++This example defines a function
++
++.. code-block:: c
++
++   int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
++
++executing the specified request, with the controller passed in when calling
++said function. In this example, the argument is provided via the ``arg``
++pointer. Note that the generated function allocates the message buffer on
++the stack. Thus, if the argument provided via the request is large, these
++kinds of macros should be avoided. Also note that, in contrast to the
++previous non-macro example, this function does not do any endianness
++conversion, which has to be handled by the caller. Apart from those
++differences the function generated by the macro is similar to the one
++provided in the non-macro example above.
++
++The full list of such function-generating macros is
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_N` for requests without return value and
++  without argument.
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_R` for equests with return value but no
++  argument.
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_W` for requests without return value but
++  with argument.
++
++Refer to their respecitve documentation for more details. For each one of
++these macros, a special variant is provided, which targets request types
++applicable to multiple instances of the same device type:
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_N`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_R`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_W`
++
++The difference of those macros to the previously mentioned versions is, that
++the device target and instance IDs are not fixed for the generated function,
++but instead have to be provided by the caller of said function.
++
++Additionally, variants for direct use with client devices, i.e.
++|ssam_device|, are also provided. These can, for example, be used as
++follows:
++
++.. code-block:: c
++
++   SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
++           .target_category = SSAM_SSH_TC_BAT,
++           .command_id      = 0x01,
++   });
++
++This invocation of the macro defines a function
++
++.. code-block:: c
++
++   int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
++
++executing the specified request, using the device IDs and controller given
++in the client device. The full list of such macros for client devices is:
++
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_N`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_R`
++- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_W`
++
++
++Handling Events
++===============
++
++To receive events from the SAM EC, an event notifier must be registered for
++the desired event via |ssam_notifier_register|. The notifier must be
++unregistered via |ssam_notifier_unregister| once it is not required any
++more.
++
++Event notifiers are registered by providing (at minimum) a callback to call
++in case an event has been received, the registry specifying how the event
++should be enabled, an event ID specifying for which target category and,
++optionally and depending on the registry used, for which instance ID events
++should be enabled, and finally, flags describing how the EC will send these
++events. Additionally, a priority for the respective notifier may be
++specified, which determines its order in relation to any other notifier
++registered for the same target category.
++
++By default, event notifiers will receive all events for the specific target
++category, regardless of the instance ID specified when registering the
++notifier. The core may be instructed to only call a notifier if the target
++ID or instance ID (or both) of the event match the ones implied by the
++notifier IDs (in case of target ID, the target ID of the registry), by
++providing an event mask (see |ssam_event_mask|).
++
++In general, the target ID of the registry is also the target ID of the
++enabled event (with the notable exception being keyboard input events on the
++Surface Laptop 1 and 2, which are enabled via a registry with target ID 1,
++but provide events with target ID 2).
++
++A full example for registering an event notifier and handling received
++events is provided below:
++
++.. code-block:: c
++
++   u32 notifier_callback(struct ssam_event_notifier *nf,
++                         const struct ssam_event *event)
++   {
++           int status = ...
++
++           /* Handle the event here ... */
++
++           /* Convert return value and indicate that we handled the event. */
++           return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++   }
++
++   int setup_notifier(struct ssam_device *sdev,
++                      struct ssam_event_notifier *nf)
++   {
++           /* Set priority wrt. other handlers of same target category. */
++           nf->base.priority = 1;
++
++           /* Set event/notifier callback. */
++           nf->base.fn = notifier_callback;
++
++           /* Specify event registry, i.e. how events get enabled/disabled. */
++           nf->event.reg = SSAM_EVENT_REGISTRY_KIP;
++
++           /* Specify which event to enable/disable */
++           nf->event.id.target_category = sdev->uid.category;
++           nf->event.id.instance = sdev->uid.instance;
++
++           /*
++            * Specify for which events the notifier callback gets executed.
++            * This essentially tells the core if it can skip notifiers that
++            * don't have target or instance IDs matching those of the event.
++            */
++           nf->event.mask = SSAM_EVENT_MASK_STRICT;
++
++           /* Specify event flags. */
++           nf->event.flags = SSAM_EVENT_SEQUENCED;
++
++           return ssam_notifier_register(sdev->ctrl, nf);
++   }
++
++Multiple event notifiers can be registered for the same event. The event
++handler core takes care of enabling and disabling events when notifiers are
++registered and unregistered, by keeping track of how many notifiers for a
++specific event (combination of registry, event target category, and event
++instance ID) are currently registered. This means that a specific event will
++be enabled when the first notifier for it is being registered and disabled
++when the last notifier for it is being unregistered. Note that the event
++flags are therefore only used on the first registered notifier, however, one
++should take care that notifiers for a specific event are always registered
++with the same flag and it is considered a bug to do otherwise.
+diff --git a/Documentation/driver-api/surface_aggregator/clients/cdev.rst b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
+new file mode 100644
+index 000000000000..63b5afcb89b5
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
+@@ -0,0 +1,85 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |u8| replace:: :c:type:`u8 <u8>`
++.. |u16| replace:: :c:type:`u16 <u16>`
++.. |ssam_cdev_request| replace:: :c:type:`struct ssam_cdev_request <ssam_cdev_request>`
++.. |ssam_request_flags| replace:: :c:type:`enum ssam_request_flags <ssam_request_flags>`
++
++==============================
++User-Space EC Interface (cdev)
++==============================
++
++The ``surface_aggregator_cdev`` module provides a misc-device for the SSAM
++controller to allow for a (more or less) direct connection from userspace to
++the SAM EC. It is intended to be used for development and debugging, and
++therefore should not be used or relied upon in any other way. Note that this
++module is not loaded automatically, but instead must be loaded manually.
++
++The provided interface is accessible through the ``/dev/surface/aggregator``
++device-file. All functionality of this interface is provided via IOCTLs.
++These IOCTLs and their respective input/output parameter structs are defined in
++``include/uapi/linux/surface_aggregator/cdev.h``.
++
++
++Controller IOCTLs
++=================
++
++The following IOCTLs are provided:
++
++.. flat-table:: Controller IOCTLs
++   :widths: 1 1 1 1 4
++   :header-rows: 1
++
++   * - Type
++     - Number
++     - Direction
++     - Name
++     - Description
++
++   * - ``0xA5``
++     - ``1``
++     - ``WR``
++     - ``REQUEST``
++     - Perform synchronous SAM request.
++
++
++``REQUEST``
++-----------
++
++Defined as ``_IOWR(0xA5, 1, struct ssam_cdev_request)``.
++
++Executes a synchronous SAM request. The request specification is passed in
++as argument of type |ssam_cdev_request|, which is then written to/modified
++by the IOCTL to return status and result of the request.
++
++Request payload data must be allocated separately and is passed in via the
++``payload.data`` and ``payload.length`` members. If a response is required,
++the response buffer must be allocated by the caller and passed in via the
++``response.data`` member. The ``response.length`` member must be set to the
++capacity of this buffer, or if no response is required, zero. Upon
++completion of the request, the call will write the response to the response
++buffer (if its capacity allows it) and overwrite the length field with the
++actual size of the response, in bytes.
++
++Additionally, if the request has a response, this should be indicated via
++the request flags, as is done with in-kernel requests. Request flags can be
++set via the ``flags`` member and the values correspond to the values found
++in |ssam_request_flags|.
++
++Finally, the status of the request itself is returned in the ``status``
++member (a negative value indicating failure). Note that failure indication
++of the IOCTL is separated from failure indication of the request: The IOCTL
++returns a negative status code if anything failed during setup of the
++request (``-EFAULT``) or if the provided argument or any of its fields are
++invalid (``-EINVAL``). In this case, the status value of the request
++argument may be set, providing more detail on what went wrong (e.g.
++``-ENOMEM`` for out-of-memory), but this value may also be zero. The IOCTL
++will return with a zero status code in case the request has been set up,
++submitted, and completed (i.e. handed back to user-space) successfully from
++inside the IOCTL, but the request ``status`` member may still be negative in
++case the actual execution of the request failed after it has been submitted.
++
++A full definition of the argument struct is provided below:
++
++.. kernel-doc:: include/uapi/linux/surface_aggregator/cdev.h
++   :functions: ssam_cdev_request
+diff --git a/Documentation/driver-api/surface_aggregator/clients/dtx.rst b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
+new file mode 100644
+index 000000000000..e974c2b04e9f
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
+@@ -0,0 +1,712 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |__u16| replace:: :c:type:`__u16 <__u16>`
++.. |sdtx_event| replace:: :c:type:`struct sdtx_event <sdtx_event>`
++.. |sdtx_event_code| replace:: :c:type:`enum sdtx_event_code <sdtx_event_code>`
++.. |sdtx_base_info| replace:: :c:type:`struct sdtx_base_info <sdtx_base_info>`
++.. |sdtx_device_mode| replace:: :c:type:`struct sdtx_device_mode <sdtx_device_mode>`
++
++======================================================
++User-Space DTX (Clipboard Detachment System) Interface
++======================================================
++
++The ``surface_dtx`` driver is responsible for proper clipboard detachment
++and re-attachment handling. To this end, it provides the ``/dev/surface/dtx``
++device file, through which it can interface with a user-space daemon. This
++daemon is then ultimately responsible for determining and taking necessary
++actions, such as unmounting devices attached to the base,
++unloading/reloading the graphics-driver, user-notifications, etc.
++
++There are two basic communication principles used in this driver: Commands
++(in other parts of the documentation also referred to as requests) and
++events. Commands are sent to the EC and may have a different implications in
++different contexts. Events are sent by the EC upon some internal state
++change. Commands are always driver-initiated, whereas events are always
++initiated by the EC.
++
++.. contents::
++
++Nomenclature
++============
++
++* **Clipboard:**
++  The detachable upper part of the Surface Book, housing the screen and CPU.
++
++* **Base:**
++  The lower part of the Surface Book from which the clipboard can be
++  detached, optionally (model dependent) housing the discrete GPU (dGPU).
++
++* **Latch:**
++  The mechanism keeping the clipboard attached to the base in normal
++  operation and allowing it to be detached when requested.
++
++* **Silently ignored commands:**
++  The command is accepted by the EC as a valid command and acknowledged
++  (following the standard communication protocol), but the EC does not act
++  upon it, i.e. ignores it.e upper part of the
++
++
++Detachment Process
++==================
++
++Warning: This part of the documentation is based on reverse engineering and
++testing and thus may contain errors or be incomplete.
++
++Latch States
++------------
++
++The latch mechanism has two major states: *open* and *closed*. In the
++*closed* state (default), the clipboard is secured to the base, whereas in
++the *open* state, the clipboard can be removed by a user.
++
++The latch can additionally be locked and, correspondingly, unlocked, which
++can influence the detachment procedure. Specifically, this locking mechanism
++is intended to prevent the the dGPU, positioned in the base of the device,
++from being hot-unplugged while in use. More details can be found in the
++documentation for the detachment procedure below. By default, the latch is
++unlocked.
++
++Detachment Procedure
++--------------------
++
++Note that the detachment process is governed fully by the EC. The
++``surface_dtx`` driver only relays events from the EC to user-space and
++commands from user-space to the EC, i.e. it does not influence this process.
++
++The detachment process is started with the user pressing the *detach* button
++on the base of the device or executing the ``SDTX_IOCTL_LATCH_REQUEST`` IOCTL.
++Following that:
++
++1. The EC turns on the indicator led on the detach-button, sends a
++   *detach-request* event (``SDTX_EVENT_REQUEST``), and awaits further
++   instructions/commands. In case the latch is unlocked, the led will flash
++   green. If the latch has been locked, the led will be solid red
++
++2. The event is, via the ``surface_dtx`` driver, relayed to user-space, where
++   an appropriate user-space daemon can handle it and send instructions back
++   to the EC via IOCTLs provided by this driver.
++
++3. The EC waits for instructions from user-space and acts according to them.
++   If the EC does not receive any instructions in a given period, it will
++   time out and continue as follows:
++
++   - If the latch is unlocked, the EC will open the latch and the clipboard
++     can be detached from the base. This is the exact behavior as without
++     this driver or any user-space daemon. See the ``SDTX_IOCTL_LATCH_CONFIRM``
++     description below for more details on the follow-up behavior of the EC.
++
++   - If the latch is locked, the EC will *not* open the latch, meaning the
++     clipboard cannot be detached from the base. Furthermore, the EC sends
++     an cancel event (``SDTX_EVENT_CANCEL``) detailing this with the cancel
++     reason ``SDTX_DETACH_TIMEDOUT`` (see :ref:`events` for details).
++
++Valid responses by a user-space daemon to a detachment request event are:
++
++- Execute ``SDTX_IOCTL_LATCH_REQUEST``. This will immediately abort the
++  detachment process. Furthermore, the EC will send a detach-request event,
++  similar to the user pressing the detach-button to cancel said process (see
++  below).
++
++- Execute ``SDTX_IOCTL_LATCH_CONFIRM``. This will cause the EC to open the
++  latch, after which the user can separate clipboard and base.
++
++  As this changes the latch state, a *latch-status* event
++  (``SDTX_EVENT_LATCH_STATUS``) will be sent once the latch has been opened
++  successfully. If the EC fails to open the latch, e.g. due to hardware
++  error or low battery, a latch-cancel event (``SDTX_EVENT_CANCEL``) will be
++  sent with the cancel reason indicating the specific failure.
++
++  If the latch is currently locked, the latch will automatically be
++  unlocked before it is opened.
++
++- Execute ``SDTX_IOCTL_LATCH_HEARTBEAT``. This will reset the internal timeout.
++  No other actions will be performed, i.e. the detachment process will neither
++  be completed nor canceled, and the EC will still be waiting for further
++  responses.
++
++- Execute ``SDTX_IOCTL_LATCH_CANCEL``. This will abort the detachment process,
++  similar to ``SDTX_IOCTL_LATCH_REQUEST``, described above, or the button
++  press, described below. A *generic request* event (``SDTX_EVENT_REQUEST``)
++  is send in response to this. In contrast to those, however, this command
++  does not trigger a new detachment process if none is currently in
++  progress.
++
++- Do nothing. The detachment process eventually times out as described in
++  point 3.
++
++See :ref:`ioctls` for more details on these responses.
++
++It is important to note that, if the user presses the detach button at any
++point when a detachment operation is in progress (i.e. after the the EC has
++sent the initial *detach-request* event (``SDTX_EVENT_REQUEST``) and before
++it received the corresponding response concluding the process), the
++detachment process is canceled on the EC-level and an identical event is
++being sent. Thus a *detach-request* event, by itself, does not signal the
++start of the detachment process.
++
++The detachment process may further be canceled by the EC due to hardware
++failures or a low clipboard battery. This is done via a cancel event
++(``SDTX_EVENT_CANCEL``) with the corresponding cancel reason.
++
++
++User-Space Interface Documentation
++==================================
++
++Error Codes and Status Values
++-----------------------------
++
++Error and status codes are divided into different categories, which can be
++used to determine if the status code is an error, and, if it is, the
++severity and type of that error. The current categories are:
++
++.. flat-table:: Overview of Status/Error Categories.
++   :widths: 2 1 3
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Short Description
++
++   * - ``STATUS``
++     - ``0x0000``
++     - Non-error status codes.
++
++   * - ``RUNTIME_ERROR``
++     - ``0x1000``
++     - Non-critical runtime errors.
++
++   * - ``HARDWARE_ERROR``
++     - ``0x2000``
++     - Critical hardware failures.
++
++   * - ``UNKNOWN``
++     - ``0xF000``
++     - Unknown error codes.
++
++Other categories are reserved for future use. The ``SDTX_CATEGORY()`` macro
++can be used to determine the category of any status value. The
++``SDTX_SUCCESS()`` macro can be used to check if the status value is a
++success value (``SDTX_CATEGORY_STATUS``) or if it indicates a failure.
++
++Unknown status or error codes sent by the EC are assigned to the ``UNKNOWN``
++category by the driver and may be implemented via their own code in the
++future.
++
++Currently used error codes are:
++
++.. flat-table:: Overview of Error Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_DETACH_NOT_FEASIBLE``
++     - ``RUNTIME``
++     - ``0x1001``
++     - Detachment not feasible due to low clipboard battery.
++
++   * - ``SDTX_DETACH_TIMEDOUT``
++     - ``RUNTIME``
++     - ``0x1002``
++     - Detachment process timed out while the latch was locked.
++
++   * - ``SDTX_ERR_FAILED_TO_OPEN``
++     - ``HARDWARE``
++     - ``0x2001``
++     - Failed to open latch.
++
++   * - ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``
++     - ``HARDWARE``
++     - ``0x2002``
++     - Failed to keep latch open.
++
++   * - ``SDTX_ERR_FAILED_TO_CLOSE``
++     - ``HARDWARE``
++     - ``0x2003``
++     - Failed to close latch.
++
++Other error codes are reserved for future use. Non-error status codes may
++overlap and are generally only unique within their use-case:
++
++.. flat-table:: Latch Status Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_LATCH_CLOSED``
++     - ``STATUS``
++     - ``0x0000``
++     - Latch is closed/has been closed.
++
++   * - ``SDTX_LATCH_OPENED``
++     - ``STATUS``
++     - ``0x0001``
++     - Latch is open/has been opened.
++
++.. flat-table:: Base State Codes.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Category
++     - Value
++     - Short Description
++
++   * - ``SDTX_BASE_DETACHED``
++     - ``STATUS``
++     - ``0x0000``
++     - Base has been detached/is not present.
++
++   * - ``SDTX_BASE_ATTACHED``
++     - ``STATUS``
++     - ``0x0001``
++     - Base has been attached/is present.
++
++Again, other codes are reserved for future use.
++
++.. _events:
++
++Events
++------
++
++Events can be received by reading from the device file. They are disabled by
++default and have to be enabled by executing ``SDTX_IOCTL_EVENTS_ENABLE``
++first. All events follow the layout prescribed by |sdtx_event|. Specific
++event types can be identified by their event code, described in
++|sdtx_event_code|. Note that other event codes are reserved for future use,
++thus an event parser must be able to handle any unknown/unsupported event
++types gracefully, by relying on the payload length given in the event header.
++
++Currently provided event types are:
++
++.. flat-table:: Overview of DTX events.
++   :widths: 2 1 1 3
++   :header-rows: 1
++
++   * - Name
++     - Code
++     - Payload
++     - Short Description
++
++   * - ``SDTX_EVENT_REQUEST``
++     - ``1``
++     - ``0`` bytes
++     - Detachment process initiated/aborted.
++
++   * - ``SDTX_EVENT_CANCEL``
++     - ``2``
++     - ``2`` bytes
++     - EC canceled detachment process.
++
++   * - ``SDTX_EVENT_BASE_CONNECTION``
++     - ``3``
++     - ``4`` bytes
++     - Base connection state changed.
++
++   * - ``SDTX_EVENT_LATCH_STATUS``
++     - ``4``
++     - ``2`` bytes
++     - Latch status changed.
++
++   * - ``SDTX_EVENT_DEVICE_MODE``
++     - ``5``
++     - ``2`` bytes
++     - Device mode changed.
++
++Individual events in more detail:
++
++``SDTX_EVENT_REQUEST``
++^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when a detachment process is started or, if in progress, aborted by the
++user, either via a detach button press or a detach request
++(``SDTX_IOCTL_LATCH_REQUEST``) being sent from user-space.
++
++Does not have any payload.
++
++``SDTX_EVENT_CANCEL``
++^^^^^^^^^^^^^^^^^^^^^
++
++Sent when a detachment process is canceled by the EC due to unfulfilled
++preconditions (e.g. clipboard battery too low to detach) or hardware
++failure. The reason for cancellation is given in the event payload detailed
++below and can be one of
++
++* ``SDTX_DETACH_TIMEDOUT``: Detachment timed out while the latch was locked.
++  The latch has neither been opened nor unlocked.
++
++* ``SDTX_DETACH_NOT_FEASIBLE``: Detachment not feasible due to low clipboard
++  battery.
++
++* ``SDTX_ERR_FAILED_TO_OPEN``: Could not open the latch (hardware failure).
++
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``: Could not keep the latch open (hardware
++  failure).
++
++* ``SDTX_ERR_FAILED_TO_CLOSE``: Could not close the latch (hardware failure).
++
++Other error codes in this context are reserved for future use.
++
++These codes can be classified via the ``SDTX_CATEGORY()`` macro to discern
++between critical hardware errors (``SDTX_CATEGORY_HARDWARE_ERROR``) or
++runtime errors (``SDTX_CATEGORY_RUNTIME_ERROR``), the latter of which may
++happen during normal operation if certain preconditions for detachment are
++not given.
++
++.. flat-table:: Detachment Cancel Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``reason``
++     - |__u16|
++     - Reason for cancellation.
++
++``SDTX_EVENT_BASE_CONNECTION``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the base connection state has changed, i.e. when the base has been
++attached, detached, or detachment has become infeasible due to low clipboard
++battery. The new state and, if a base is connected, ID of the base is
++provided as payload of type |sdtx_base_info| with its layout presented
++below:
++
++.. flat-table:: Base-Connection-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``state``
++     - |__u16|
++     - Base connection state.
++
++   * - ``base_id``
++     - |__u16|
++     - Type of base connected (zero if none).
++
++Possible values for ``state`` are:
++
++* ``SDTX_BASE_DETACHED``,
++* ``SDTX_BASE_ATTACHED``, and
++* ``SDTX_DETACH_NOT_FEASIBLE``.
++
++Other values are reserved for future use.
++
++``SDTX_EVENT_LATCH_STATUS``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the latch status has changed, i.e. when the latch has been opened,
++closed, or an error occurred. The current status is provided as payload:
++
++.. flat-table:: Latch-Status-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``status``
++     - |__u16|
++     - Latch status.
++
++Possible values for ``status`` are:
++
++* ``SDTX_LATCH_CLOSED``,
++* ``SDTX_LATCH_OPENED``,
++* ``SDTX_ERR_FAILED_TO_OPEN``,
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
++* ``SDTX_ERR_FAILED_TO_CLOSE``.
++
++Other values are reserved for future use.
++
++``SDTX_EVENT_DEVICE_MODE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Sent when the device mode has changed. The new device mode is provided as
++payload:
++
++.. flat-table:: Device-Mode-Change Event Payload
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - ``mode``
++     - |__u16|
++     - Device operation mode.
++
++Possible values for ``mode`` are:
++
++* ``SDTX_DEVICE_MODE_TABLET``,
++* ``SDTX_DEVICE_MODE_LAPTOP``, and
++* ``SDTX_DEVICE_MODE_STUDIO``.
++
++Other values are reserved for future use.
++
++.. _ioctls:
++
++IOCTLs
++------
++
++The following IOCTLs are provided:
++
++.. flat-table:: Overview of DTX IOCTLs
++   :widths: 1 1 1 1 4
++   :header-rows: 1
++
++   * - Type
++     - Number
++     - Direction
++     - Name
++     - Description
++
++   * - ``0xA5``
++     - ``0x21``
++     - ``-``
++     - ``EVENTS_ENABLE``
++     - Enable events for the current file descriptor.
++
++   * - ``0xA5``
++     - ``0x22``
++     - ``-``
++     - ``EVENTS_DISABLE``
++     - Disable events for the current file descriptor.
++
++   * - ``0xA5``
++     - ``0x23``
++     - ``-``
++     - ``LATCH_LOCK``
++     - Lock the latch.
++
++   * - ``0xA5``
++     - ``0x24``
++     - ``-``
++     - ``LATCH_UNLOCK``
++     - Unlock the latch.
++
++   * - ``0xA5``
++     - ``0x25``
++     - ``-``
++     - ``LATCH_REQUEST``
++     - Request clipboard detachment.
++
++   * - ``0xA5``
++     - ``0x26``
++     - ``-``
++     - ``LATCH_CONFIRM``
++     - Confirm clipboard detachment request.
++
++   * - ``0xA5``
++     - ``0x27``
++     - ``-``
++     - ``LATCH_HEARTBEAT``
++     - Send heartbeat signal to EC.
++
++   * - ``0xA5``
++     - ``0x28``
++     - ``-``
++     - ``LATCH_CANCEL``
++     - Cancel detachment process.
++
++   * - ``0xA5``
++     - ``0x29``
++     - ``R``
++     - ``GET_BASE_INFO``
++     - Get current base/connection information.
++
++   * - ``0xA5``
++     - ``0x2A``
++     - ``R``
++     - ``GET_DEVICE_MODE``
++     - Get current device operation mode.
++
++   * - ``0xA5``
++     - ``0x2B``
++     - ``R``
++     - ``GET_LATCH_STATUS``
++     - Get current device latch status.
++
++``SDTX_IOCTL_EVENTS_ENABLE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x22)``.
++
++Enable events for the current file descriptor. Events can be obtained by
++reading from the device, if enabled. Events are disabled by default.
++
++``SDTX_IOCTL_EVENTS_DISABLE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x22)``.
++
++Disable events for the current file descriptor. Events can be obtained by
++reading from the device, if enabled. Events are disabled by default.
++
++``SDTX_IOCTL_LATCH_LOCK``
++^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x23)``.
++
++Locks the latch, causing the detachment procedure to abort without opening
++the latch on timeout. The latch is unlocked by default. This command will be
++silently ignored if the latch is already locked.
++
++``SDTX_IOCTL_LATCH_UNLOCK``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x24)``.
++
++Unlocks the latch, causing the detachment procedure to open the latch on
++timeout. The latch is unlocked by default. This command will not open the
++latch when sent during an ongoing detachment process. It will be silently
++ignored if the latch is already unlocked.
++
++``SDTX_IOCTL_LATCH_REQUEST``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x25)``.
++
++Generic latch request. Behavior depends on the context: If no
++detachment-process is active, detachment is requested. Otherwise the
++currently active detachment-process will be aborted.
++
++If a detachment process is canceled by this operation, a generic detachment
++request event (``SDTX_EVENT_REQUEST``) will be sent.
++
++This essentially behaves the same as a detachment button press.
++
++``SDTX_IOCTL_LATCH_CONFIRM``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x26)``.
++
++Acknowledges and confirms a latch request. If sent during an ongoing
++detachment process, this command causes the latch to be opened immediately.
++The latch will also be opened if it has been locked. In this case, the latch
++lock is reset to the unlocked state.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_LATCH_HEARTBEAT``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x27)``.
++
++Sends a heartbeat, essentially resetting the detachment timeout. This
++command can be used to keep the detachment process alive while work required
++for the detachment to succeed is still in progress.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_LATCH_CANCEL``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IO(0xA5, 0x28)``.
++
++Cancels detachment in progress (if any). If a detachment process is canceled
++by this operation, a generic detachment request event
++(``SDTX_EVENT_REQUEST``) will be sent.
++
++This command will be silently ignored if there is currently no detachment
++procedure in progress.
++
++``SDTX_IOCTL_GET_BASE_INFO``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x29, struct sdtx_base_info)``.
++
++Get the current base connection state (i.e. attached/detached) and the type
++of the base connected to the clipboard. This is command essentially provides
++a way to query the information provided by the base connection change event
++(``SDTX_EVENT_BASE_CONNECTION``).
++
++Possible values for ``struct sdtx_base_info.state`` are:
++
++* ``SDTX_BASE_DETACHED``,
++* ``SDTX_BASE_ATTACHED``, and
++* ``SDTX_DETACH_NOT_FEASIBLE``.
++
++Other values are reserved for future use.
++
++``SDTX_IOCTL_GET_DEVICE_MODE``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x2A, __u16)``.
++
++Returns the device operation mode, indicating if and how the base is
++attached to the clipboard. This is command essentially provides a way to
++query the information provided by the device mode change event
++(``SDTX_EVENT_DEVICE_MODE``).
++
++Returned values are:
++
++* ``SDTX_DEVICE_MODE_LAPTOP``
++* ``SDTX_DEVICE_MODE_TABLET``
++* ``SDTX_DEVICE_MODE_STUDIO``
++
++See |sdtx_device_mode| for details. Other values are reserved for future
++use.
++
++
++``SDTX_IOCTL_GET_LATCH_STATUS``
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Defined as ``_IOR(0xA5, 0x2B, __u16)``.
++
++Get the current latch status or (presumably) the last error encountered when
++trying to open/close the latch. This is command essentially provides a way
++to query the information provided by the latch status change event
++(``SDTX_EVENT_LATCH_STATUS``).
++
++Returned values are:
++
++* ``SDTX_LATCH_CLOSED``,
++* ``SDTX_LATCH_OPENED``,
++* ``SDTX_ERR_FAILED_TO_OPEN``,
++* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
++* ``SDTX_ERR_FAILED_TO_CLOSE``.
++
++Other values are reserved for future use.
++
++A Note on Base IDs
++------------------
++
++Base types/IDs provided via ``SDTX_EVENT_BASE_CONNECTION`` or
++``SDTX_IOCTL_GET_BASE_INFO`` are directly forwarded from from the EC in the
++lower byte of the combined |__u16| value, with the driver storing the EC
++type from which this ID comes in the high byte (without this, base IDs over
++different types of ECs may be overlapping).
++
++The ``SDTX_DEVICE_TYPE()`` macro can be used to determine the EC device
++type. This can be one of
++
++* ``SDTX_DEVICE_TYPE_HID``, for Surface Aggregator Module over HID, and
++
++* ``SDTX_DEVICE_TYPE_SSH``, for Surface Aggregator Module over Surface Serial
++  Hub.
++
++Note that currently only the ``SSH`` type EC is supported, however ``HID``
++type is reserved for future use.
++
++Structures and Enums
++--------------------
++
++.. kernel-doc:: include/uapi/linux/surface_aggregator/dtx.h
+diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
+new file mode 100644
+index 000000000000..98ea9946b8a2
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
+@@ -0,0 +1,22 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++===========================
++Client Driver Documentation
++===========================
++
++This is the documentation for client drivers themselves. Refer to
++:doc:`../client` for documentation on how to write client drivers.
++
++.. toctree::
++   :maxdepth: 1
++
++   cdev
++   dtx
++   san
++
++.. only::  subproject and html
++
++   Indices
++   =======
++
++   * :ref:`genindex`
+diff --git a/Documentation/driver-api/surface_aggregator/clients/san.rst b/Documentation/driver-api/surface_aggregator/clients/san.rst
+new file mode 100644
+index 000000000000..1bf830ad367d
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/clients/san.rst
+@@ -0,0 +1,44 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |san_client_link| replace:: :c:func:`san_client_link`
++.. |san_dgpu_notifier_register| replace:: :c:func:`san_dgpu_notifier_register`
++.. |san_dgpu_notifier_unregister| replace:: :c:func:`san_dgpu_notifier_unregister`
++
++===================
++Surface ACPI Notify
++===================
++
++The Surface ACPI Notify (SAN) device provides the bridge between ACPI and
++SAM controller. Specifically, ACPI code can execute requests and handle
++battery and thermal events via this interface. In addition to this, events
++relating to the discrete GPU (dGPU) of the Surface Book 2 can be sent from
++ACPI code (note: the Surface Book 3 uses a different method for this). The
++only currently known event sent via this interface is a dGPU power-on
++notification. While this driver handles the former part internally, it only
++relays the dGPU events to any other driver interested via its public API and
++does not handle them.
++
++The public interface of this driver is split into two parts: Client
++registration and notifier-block registration.
++
++A client to the SAN interface can be linked as consumer to the SAN device
++via |san_client_link|. This can be used to ensure that the a client
++receiving dGPU events does not miss any events due to the SAN interface not
++being set up as this forces the client driver to unbind once the SAN driver
++is unbound.
++
++Notifier-blocks can be registered by any device for as long as the module is
++loaded, regardless of being linked as client or not. Registration is done
++with |san_dgpu_notifier_register|. If the notifier is not needed any more, it
++should be unregistered via |san_dgpu_notifier_unregister|.
++
++Consult the API documentation below for more details.
++
++
++API Documentation
++=================
++
++.. kernel-doc:: include/linux/surface_acpi_notify.h
++
++.. kernel-doc:: drivers/misc/surface_aggregator/clients/surface_acpi_notify.c
++    :export:
+diff --git a/Documentation/driver-api/surface_aggregator/index.rst b/Documentation/driver-api/surface_aggregator/index.rst
+new file mode 100644
+index 000000000000..9fa70eedca59
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/index.rst
+@@ -0,0 +1,21 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++=======================================
++Surface System Aggregator Module (SSAM)
++=======================================
++
++.. toctree::
++   :maxdepth: 2
++
++   overview
++   ssh
++   client
++   internal
++   clients/index
++
++.. only::  subproject and html
++
++   Indices
++   =======
++
++   * :ref:`genindex`
+diff --git a/Documentation/driver-api/surface_aggregator/internal-api.rst b/Documentation/driver-api/surface_aggregator/internal-api.rst
+new file mode 100644
+index 000000000000..db6a70119f49
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/internal-api.rst
+@@ -0,0 +1,67 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++==========================
++Internal API Documentation
++==========================
++
++.. contents::
++    :depth: 2
++
++
++Packet Transport Layer
++======================
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_parser.h
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_parser.c
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_msgb.h
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.h
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.c
++    :internal:
++
++
++Request Transport Layer
++=======================
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_request_layer.h
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/ssh_request_layer.c
++    :internal:
++
++
++Controller
++==========
++
++.. kernel-doc:: drivers/misc/surface_aggregator/controller.h
++    :internal:
++
++.. kernel-doc:: drivers/misc/surface_aggregator/controller.c
++    :internal:
++
++
++Client Device Bus
++=================
++
++.. kernel-doc:: drivers/misc/surface_aggregator/bus.c
++    :internal:
++
++
++Core
++====
++
++.. kernel-doc:: drivers/misc/surface_aggregator/core.c
++    :internal:
++
++
++Trace Helpers
++=============
++
++.. kernel-doc:: drivers/misc/surface_aggregator/trace.h
+diff --git a/Documentation/driver-api/surface_aggregator/internal.rst b/Documentation/driver-api/surface_aggregator/internal.rst
+new file mode 100644
+index 000000000000..6c020b87ad62
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/internal.rst
+@@ -0,0 +1,50 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++=====================
++Core Driver Internals
++=====================
++
++For the API documentation, refer to:
++
++.. toctree::
++   :maxdepth: 2
++
++   internal-api
++
++
++Overview
++========
++
++The SSAM core implementation is structured in layers, somewhat following the
++SSH protocol structure:
++
++Lower-level packet transport is implemented in the *packet transport layer
++(PTL)*, directly building on top of the serial device (serdev)
++infrastructure of the kernel. As the name indicates, this layer deals with
++the packet transport logic and handles things like packet validation, packet
++acknowledgement (ACKing), packet (retransmission) timeouts, and relaying
++packet payloads to higher-level layers.
++
++Above this sits the *request transport layer (RTL)*. This layer is centered
++around command-type packet payloads, i.e. requests (sent from host to EC),
++responses of the EC to those requests, and events (sent from EC to host).
++It, specifically, distinguishes events from request responses, matches
++responses to their corresponding requests, and implements request timeouts.
++
++The *controller* layer is building on top of this and essentially decides
++how request responses and, especially, events are dealt with. It provides an
++event notifier system, handles event activation/deactivation, provides a
++workqueue for event and asynchronous request completion, and also manages
++the message counters required for building command messages (``SEQ``,
++``RQID``). This layer basically provides a fundamental interface to the SAM
++EC for use in other kernel drivers.
++
++While the controller layer already provides an interface for other kernel
++drivers, the client *bus* extends this interface to provide support for
++native SSAM devices, i.e. devices that are not defined in ACPI and not
++implemented as platform devices, via :c:type:`struct ssam_device <ssam_device>`
++and :c:type:`struct ssam_device_driver <ssam_device_driver>`. This aims to
++simplify management of client devices and client drivers.
++
++Refer to :doc:`client` for documentation regarding the client device/driver
++API and interface options for other kernel drivers.
+diff --git a/Documentation/driver-api/surface_aggregator/overview.rst b/Documentation/driver-api/surface_aggregator/overview.rst
+new file mode 100644
+index 000000000000..06d49ce001e7
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/overview.rst
+@@ -0,0 +1,76 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++========
++Overview
++========
++
++The Surface/System Aggregator Module (SAM, SSAM) is an (arguably *the*)
++embedded controller (EC) on Microsoft Surface devices. It has been originally
++introduced on 4th generation devices (Surface Pro 4, Surface Book 1), but
++its responsibilities and feature-set have since been expanded significantly
++with the following generations.
++
++
++Features and Integration
++========================
++
++Not much is currently known about SAM on 4th generation devices (Surface Pro
++4, Surface Book 1), due to the use of a different communication interface
++between host and EC (as detailed below). On 5th (Surface Pro 2017, Surface
++Book 2, Surface Laptop 1) and later generation devices, SAM is responsible
++for providing battery information (both current status and static values,
++such as maximum capacity etc.), as well as an assortment of temperature
++sensors (e.g. skin temperature) and cooling/performance-mode setting to the
++host. On the Surface Book 2, specifically, it additionally provides an
++interface for properly handling clipboard detachment (i.e. separating the
++display part from the keyboard part of the device), on the Surface Laptop 1
++and 2 it is required for keyboard HID input. This HID subsystem has been
++restructured for 7th generation devices and on those, specifically Surface
++Laptop 3 and Surface Book 3, is responsible for all major HID input (i.e.
++keyboard and touchpad).
++
++While the features have not changed much on a coarse level since the 5th
++generation, internal interfaces have undergone some rather large changes. On
++5th and 6th generation devices, both battery and temperature information is
++exposed to ACPI via a shim driver (referred to as Surface ACPI Notify, or
++SAN), translating ACPI generic serial bus write-/read-accesses to SAM
++requests. On 7th generation devices, this additional layer is gone and these
++devices require a driver hooking directly into the SAM interface. Equally,
++on newer generations, less devices are declared in ACPI, making them a bit
++harder to discover and requiring us to hard-code a sort of device registry.
++Due to this, a SSAM bus and subsystem with client devices
++(:c:type:`struct ssam_device <ssam_device>`) has been implemented.
++
++
++Communication
++=============
++
++The type of communication interface between host and EC depends on the
++generation of the Surface device. On 4th generation devices, host and EC
++communicate via HID, specifically using a HID-over-I2C device, whereas on
++5th and later generations, communication takes place via a USART serial
++device. In accordance to the drivers found on other operating systems, we
++refer to the serial device and its driver as Surface Serial Hub (SSH) and
++when needed to differentiate between both types of SAM as SAM-over-SSH, in
++contrast to SAM-over-HID for the former variant.
++
++Currently, this subsystem only supports SAM-over-SSH. The SSH communication
++interface is described in more detail below. The HID interface has not been
++reverse engineered yet and it is, at the moment, unclear how many (and
++which) concepts of the SSH interface detailed below can be transferred to
++it.
++
++Surface Serial Hub
++------------------
++
++As already elaborated above, the Surface Serial Hub (SSH) is the
++communication interface for SAM on 5th- and all later-generation Surface
++devices. On the highest level, communication can be sparated into two main
++types: Requests, messages sent from host to EC that may trigger a direct
++response from the EC (explicitly associated with the request), and events
++(sometimes also referred to as notifications), sent from EC to host without
++being a direct response to a previous request. We may also refer to requests
++without response as commands. In general, events need to be enabled via one
++of multiple dedicated commands before they are sent by the EC.
++
++See :doc:`ssh` for a more technical protocol documentation.
+diff --git a/Documentation/driver-api/surface_aggregator/ssh.rst b/Documentation/driver-api/surface_aggregator/ssh.rst
+new file mode 100644
+index 000000000000..0b68228010e9
+--- /dev/null
++++ b/Documentation/driver-api/surface_aggregator/ssh.rst
+@@ -0,0 +1,343 @@
++.. SPDX-License-Identifier: GPL-2.0+
++
++.. |u8| replace:: :c:type:`u8 <u8>`
++.. |u16| replace:: :c:type:`u16 <u16>`
++.. |TYPE| replace:: ``TYPE``
++.. |LEN| replace:: ``LEN``
++.. |SEQ| replace:: ``SEQ``
++.. |SYN| replace:: ``SYN``
++.. |NAK| replace:: ``NAK``
++.. |ACK| replace:: ``ACK``
++.. |DATA| replace:: ``DATA``
++.. |DATA_SEQ| replace:: ``DATA_SEQ``
++.. |DATA_NSQ| replace:: ``DATA_NSQ``
++.. |TC| replace:: ``TC``
++.. |TID| replace:: ``TID``
++.. |IID| replace:: ``IID``
++.. |RQID| replace:: ``RQID``
++.. |CID| replace:: ``CID``
++
++===========================
++Surface Serial Hub Protocol
++===========================
++
++The Surface Serial Hub (SSH) is the central communication interface for the
++embedded Surface Aggregator Module controller (SAM or EC) on newer Surface
++generations. We will refer to this protocol and interface as SAM-over-SSH,
++as opposed to SAM-over-HID for the older generations.
++
++On Surface devices with SAM-over-SSH, SAM is connected to the host via UART
++and defined in ACPI as device with ID ``MSHW0084``. On these devices,
++significant functionality is provided via SAM, including access to battery
++and power information and events, thermal read-outs and events, and many
++more. For Surface Laptops, keyboard input is handled via HID directed
++through SAM, on the Surface Laptop 3 and Surface Book 3 this also includes
++touchpad input.
++
++Note that the standard disclaimer for this subsystem also applies to this
++document: All of this has been reverse-engineered and may thus be erroneous
++and/or incomplete.
++
++All CRCs used in the following are two-byte ``crc_ccitt_false(0xffff, ...)``.
++All multi-byte values are little-endian, there is no implicit padding between
++values.
++
++
++SSH Packet Protocol: Definitions
++================================
++
++The fundamental communication unit of the SSH protocol is a frame
++(:c:type:`struct ssh_frame <ssh_frame>`). A frame consists of the following
++fields, packed together and in order:
++
++.. flat-table:: SSH Frame
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - |TYPE|
++     - |u8|
++     - Type identifier of the frame.
++
++   * - |LEN|
++     - |u16|
++     - Length of the payload associated with the frame.
++
++   * - |SEQ|
++     - |u8|
++     - Sequence ID (see explanation below).
++
++Each frame structure is followed by a CRC over this structure. The CRC over
++the frame structure (|TYPE|, |LEN|, and |SEQ| fields) is placed directly
++after the frame structure and before the payload. The payload is followed by
++its own CRC (over all payload bytes). If the payload is not present (i.e.
++the frame has ``LEN=0``), the CRC of the payload is still present and will
++evaluate to ``0xffff``. The |LEN| field does not include any of the CRCs, it
++equals the number of bytes inbetween the CRC of the frame and the CRC of the
++payload.
++
++Additionally, the following fixed two-byte sequences are used:
++
++.. flat-table:: SSH Byte Sequences
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Description
++
++   * - |SYN|
++     - ``[0xAA, 0x55]``
++     - Synchronization bytes.
++
++A message consists of |SYN|, followed by the frame (|TYPE|, |LEN|, |SEQ| and
++CRC) and, if specified in the frame (i.e. ``LEN > 0``), payload bytes,
++followed finally, regardless if the payload is present, the payload CRC. The
++messages corresponding to an exchange are, in part, identified by having the
++same sequence ID (|SEQ|), stored inside the frame (more on this in the next
++section). The sequence ID is a wrapping counter.
++
++A frame can have the following types
++(:c:type:`enum ssh_frame_type <ssh_frame_type>`):
++
++.. flat-table:: SSH Frame Types
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Name
++     - Value
++     - Short Description
++
++   * - |NAK|
++     - ``0x04``
++     - Sent on error in previously received message.
++
++   * - |ACK|
++     - ``0x40``
++     - Sent to acknowledge receival of |DATA| frame.
++
++   * - |DATA_SEQ|
++     - ``0x80``
++     - Sent to transfer data. Sequenced.
++
++   * - |DATA_NSQ|
++     - ``0x00``
++     - Same as |DATA_SEQ|, but does not need to be ACKed.
++
++Both |NAK|- and |ACK|-type frames are used to control flow of messages and
++thus do not carry a payload. |DATA_SEQ|- and |DATA_NSQ|-type frames on the
++other hand must carry a payload. The flow sequence and interaction of
++different frame types will be described in more depth in the next section.
++
++
++SSH Packet Protocol: Flow Sequence
++==================================
++
++Each exchange begins with |SYN|, followed by a |DATA_SEQ|- or
++|DATA_NSQ|-type frame, followed by its CRC, payload, and payload CRC. In
++case of a |DATA_NSQ|-type frame, the exchange is then finished. In case of a
++|DATA_SEQ|-type frame, the receiving party has to acknowledge receival of
++the frame by responding with a message containing an |ACK|-type frame with
++the same sequence ID of the |DATA| frame. In other words, the sequence ID of
++the |ACK| frame specifies the |DATA| frame to be acknowledged. In case of an
++error, e.g. an invalid CRC, the receiving party responds with a message
++containing an |NAK|-type frame. As the sequence ID of the previous data
++frame, for which an error is indicated via the |NAK| frame, cannot be relied
++upon, the sequence ID of the |NAK| frame should not be used and is set to
++zero. After receival of an |NAK| frame, the sending party should re-send all
++outstanding (non-ACKed) messages.
++
++Sequence IDs are not synchronized between the two parties, meaning that they
++are managed independently for each party. Identifying the messages
++corresponding to a single exchange thus relies on the sequence ID as well as
++the type of the message, and the context. Specifically, the sequence ID is
++used to associate an ``ACK`` with its ``DATA_SEQ``-type frame, but not
++``DATA_SEQ``- or ``DATA_NSQ``-type frames with other ``DATA``- type frames.
++
++An example exchange might look like this:
++
++::
++
++    tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
++    rx: ------------------------------------- SYN FRAME(A) CRC(F) CRC(P) --
++
++where both frames have the same sequence ID (``SEQ``). Here, ``FRAME(D)``
++indicates a |DATA_SEQ|-type frame, ``FRAME(A)`` an ``ACK``-type frame,
++``CRC(F)`` the CRC over the previous frame, ``CRC(P)`` the CRC over the
++previous payload. In case of an error, the exchange would look like this:
++
++::
++
++    tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
++    rx: ------------------------------------- SYN FRAME(N) CRC(F) CRC(P) --
++
++upon which the sender should re-send the message. ``FRAME(N)`` indicates an
++|NAK|-type frame. Note that the sequence ID of the |NAK|-type frame is fixed
++to zero. For |DATA_NSQ|-type frames, both exchanges are the same:
++
++::
++
++    tx: -- SYN FRAME(DATA_NSQ) CRC(F) PAYLOAD CRC(P) ----------------------
++    rx: -------------------------------------------------------------------
++
++Here, an error can be detected, but not corrected or indicated to the
++sending party. These exchanges are symmetric, i.e. switching rx and tx
++results again in a valid exchange. Currently, no longer exchanges are known.
++
++
++Commands: Requests, Responses, and Events
++=========================================
++
++Commands are sent as payload inside a data frame. Currently, this is the
++only known payload type of |DATA| frames, with a payload-type value of
++``0x80`` (:c:type:`SSH_PLD_TYPE_CMD <ssh_payload_type>`).
++
++The command-type payload (:c:type:`struct ssh_command <ssh_command>`)
++consists of an eight-byte command structure, followed by optional and
++variable length command data. The length of this optional data is derived
++from the frame payload length given in the corresponding frame, i.e. it is
++``frame.len - sizeof(struct ssh_command)``. The command struct contains the
++following fields, packed together and in order:
++
++.. flat-table:: SSH Command
++   :widths: 1 1 4
++   :header-rows: 1
++
++   * - Field
++     - Type
++     - Description
++
++   * - |TYPE|
++     - |u8|
++     - Type of the payload. For commands always ``0x80``.
++
++   * - |TC|
++     - |u8|
++     - Target category.
++
++   * - |TID| (out)
++     - |u8|
++     - Target ID for outgoing (host to EC) commands.
++
++   * - |TID| (in)
++     - |u8|
++     - Target ID for incoming (EC to host) commands.
++
++   * - |IID|
++     - |u8|
++     - Instance ID.
++
++   * - |RQID|
++     - |u16|
++     - Request ID.
++
++   * - |CID|
++     - |u8|
++     - Command ID.
++
++The command struct and data, in general, does not contain any failure
++detection mechanism (e.g. CRCs), this is solely done on the frame level.
++
++Command-type payloads are used by the host to send commands and requests to
++the EC as well as by the EC to send responses and events back to the host.
++We differentiate between requests (sent by the host), responses (sent by the
++EC in response to a request), and events (sent by the EC without a
++preceeding request).
++
++Commands and events are uniquely identified by their target category
++(``TC``) and command ID (``CID``). The target category specifies a general
++category for the command (e.g. system in general, vs. battery and ac, vs.
++temperature, and so on), while the command ID specifies the command inside
++that category. Only the combination of |TC| + |CID| is unique. Additionally,
++commands have an instance ID (``IID``), which is used to differentiate
++between different sub-devices. For example ``TC=3`` ``CID=1`` is a
++request to get the temperature on a thermal sensor, where |IID| specifies
++the respective sensor. If the instance ID is not used, it should be set to
++zero. If instance IDs are used, they, in general, start with a value of one,
++whereas zero may be used for instance independent queries, if applicable. A
++response to a request should have the same target category, command ID, and
++instance ID as the corresponding request.
++
++Responses are matched to their corresponding request via the request ID
++(``RQID``) field. This is a 16 bit wrapping counter similar to the sequence
++ID on the frames. Note that the sequence ID of the frames for a
++request-response pair does not match. Only the request ID has to match.
++Frame-protocol wise these are two separate exchanges, and may even be
++separated, e.g. by an event being sent after the request but before the
++response. Not all commands produce a response, and this is not detectable by
++|TC| + |CID|. It is the responsibility of the issuing party to wait for a
++response (or signal this to the communication framework, as is done in
++SAN/ACPI via the ``SNC`` flag).
++
++Events are identified by unique and reserved request IDs. These IDs should
++not be used by the host when sending a new request. They are used on the
++host to, first, detect events and, second, match them with a registered
++event handler. Request IDs for events are chosen by the host and directed to
++the EC when setting up and enabling an event source (via the
++enable-event-source request). The EC then uses the specified request ID for
++events sent from the respective source. Note that an event should still be
++identified by its target category, command ID, and, if applicable, instance
++ID, as a single event source can send multiple different event types. In
++general, however, a single target category should map to a single reserved
++event request ID.
++
++Furthermore, requests, responses, and events have an associated target ID
++(``TID``). This target ID is split into output (host to EC) and input (EC to
++host) fields, with the respecting other field (e.g. output field on incoming
++messages) set to zero. Two ``TID`` values are known: Primary (``0x01``) and
++secondary (``0x02``). In general, the response to a request should have the
++same ``TID`` value, however, the field (output vs. input) should be used in
++accordance to the direction in which the response is sent (i.e. on the input
++field, as responses are generally sent from the EC to the host).
++
++Note that, even though requests and events should be uniquely identifiable
++by target category and command ID alone, the EC may require specific
++priority and instance ID values to accept a command. A command that is
++accepted for ``TID=1``, for example, may not be accepted for ``TID=2``
++and vice versa.
++
++
++Limitations and Observations
++============================
++
++The protocol can, in theory, handle up to ``U8_MAX`` frames in parallel,
++with up to ``U16_MAX`` pending requests (neglecting request IDs reserved for
++events). In practice, however, this is more limited. From our testing
++(altough via a python and thus a user-space program), it seems that the EC
++can handle up to four requests (mostly) reliably in parallel at a certain
++time. With five or more requests in parallel, consistent discarding of
++commands (ACKed frame but no command response) has been observed. For five
++simultaneous commands, this reproducibly resulted in one command being
++dropped and four commands being handled.
++
++However, it has also been noted that, even with three requests in parallel,
++occasional frame drops happen. Apart from this, with a limit of three
++pending requests, no dropped commands (i.e. command being dropped but frame
++carrying command being ACKed) have been observed. In any case, frames (and
++possibly also commands) should be re-sent by the host if a certain timeout
++is exceeded. This is done by the EC for frames with a timeout of one second,
++up to two re-tries (i.e. three transmissions in total). The limit of
++re-tries also applies to received NAKs, and, in a worst case scenario, can
++lead to entire messages being dropped.
++
++While this also seems to work fine for pending data frames as long as no
++transmission failures occur, implementation and handling of these seems to
++depend on the assumption that there is only one non-acknowledged data frame.
++In particular, the detection of repeated frames relies on the last sequence
++number. This means that, if a frame that has been successfully received by
++the EC is sent again, e.g. due to the host not receiving an |ACK|, the EC
++will only detect this if it has the sequence ID of the last frame received
++by the EC. As an example: Sending two frames with ``SEQ=0`` and ``SEQ=1``
++followed by a repetition of ``SEQ=0`` will not detect the second ``SEQ=0``
++frame as such, and thus execute the command in this frame each time it has
++been received, i.e. twice in this example. Sending ``SEQ=0``, ``SEQ=1`` and
++then repeating ``SEQ=1`` will detect the second ``SEQ=1`` as repetition of
++the first one and ignore it, thus executing the contained command only once.
++
++In conclusion, this suggests a limit of at most one pending un-ACKed frame
++(per party, effectively leading to synchronous communication regarding
++frames) and at most three pending commands. The limit to synchronous frame
++transfers seems to be consistent with behavior observed on Windows.
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 102969c546d7..1c9d36272ea6 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -472,5 +472,6 @@ source "drivers/misc/ocxl/Kconfig"
+ source "drivers/misc/cardreader/Kconfig"
+ source "drivers/misc/habanalabs/Kconfig"
+ source "drivers/misc/uacce/Kconfig"
++source "drivers/misc/surface_aggregator/Kconfig"
+ source "drivers/misc/ipts/Kconfig"
+ endmenu
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index f97938d777e1..b0caee1dd3b7 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -58,3 +58,4 @@ obj-$(CONFIG_HABANA_AI)		+= habanalabs/
+ obj-$(CONFIG_UACCE)		+= uacce/
+ obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
+ obj-$(CONFIG_MISC_IPTS)		+= ipts/
++obj-$(CONFIG_SURFACE_AGGREGATOR)	+= surface_aggregator/
+diff --git a/drivers/misc/surface_aggregator/Kconfig b/drivers/misc/surface_aggregator/Kconfig
+new file mode 100644
+index 000000000000..47dd8fdffac3
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/Kconfig
+@@ -0,0 +1,67 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++menuconfig SURFACE_AGGREGATOR
++	tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
++	depends on SERIAL_DEV_BUS
++	depends on ACPI
++	select CRC_CCITT
++	help
++	  The Surface System Aggregator Module (Surface SAM or SSAM) is an
++	  embedded controller (EC) found on 5th- and later-generation Microsoft
++	  Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop,
++	  and newer, with exception of Surface Go series devices).
++
++	  Depending on the device in question, this EC provides varying
++	  functionality, including:
++	  - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation)
++	  - battery status information (all devices)
++	  - thermal sensor access (all devices)
++	  - performance mode / cooling mode control (all devices)
++	  - clipboard detachment system control (Surface Book 2 and 3)
++	  - HID / keyboard input (Surface Laptops, Surface Book 3)
++
++	  This option controls whether the Surface SAM subsystem core will be
++	  built. This includes a driver for the Surface Serial Hub (SSH), which
++	  is the device responsible for the communication with the EC, and a
++	  basic kernel interface exposing the EC functionality to other client
++	  drivers, i.e. allowing them to make requests to the EC and receive
++	  events from it. Selecting this option alone will not provide any
++	  client drivers and therefore no functionality beyond the in-kernel
++	  interface. Said functionality is the repsonsibility of the respective
++	  client drivers.
++
++	  Note: While 4th-generation Surface devices also make use of a SAM EC,
++	  due to a difference in the communication interface of the controller,
++	  only 5th and later generations are currently supported. Specifically,
++	  devices using SAM-over-SSH are supported, whereas devices using
++	  SAM-over-HID, which is used on the 4th generation, are currently not
++	  supported.
++
++config SURFACE_AGGREGATOR_BUS
++	bool "Surface System Aggregator Module Bus"
++	depends on SURFACE_AGGREGATOR
++	default y
++	help
++	  Expands the Surface System Aggregator Module (SSAM) core driver by
++	  providing a dedicated bus and client-device type.
++
++	  This bus and device type are intended to provide and simplify support
++	  for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are
++	  not auto-detectable via the conventional means (e.g. ACPI).
++
++config SURFACE_AGGREGATOR_ERROR_INJECTION
++	bool "Surface System Aggregator Module Error Injection Capabilities"
++	depends on SURFACE_AGGREGATOR
++	depends on FUNCTION_ERROR_INJECTION
++	help
++	  Provides error-injection capabilities for the Surface System
++	  Aggregator Module subsystem and Surface Serial Hub driver.
++
++	  Specifically, exports error injection hooks to be used with the
++	  kernel's function error injection capabilities to simulate underlying
++	  transport and communication problems, such as invalid data sent to or
++	  received from the EC, dropped data, and communication timeouts.
++	  Intended for development and debugging.
++
++source "drivers/misc/surface_aggregator/clients/Kconfig"
+diff --git a/drivers/misc/surface_aggregator/Makefile b/drivers/misc/surface_aggregator/Makefile
+new file mode 100644
+index 000000000000..b48ffc37ab52
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/Makefile
+@@ -0,0 +1,18 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++# For include/trace/define_trace.h to include trace.h
++CFLAGS_core.o = -I$(src)
++
++obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
++obj-$(CONFIG_SURFACE_AGGREGATOR) += clients/
++
++surface_aggregator-objs := core.o
++surface_aggregator-objs += ssh_parser.o
++surface_aggregator-objs += ssh_packet_layer.o
++surface_aggregator-objs += ssh_request_layer.o
++surface_aggregator-objs += controller.o
++
++ifeq ($(CONFIG_SURFACE_AGGREGATOR_BUS),y)
++surface_aggregator-objs += bus.o
++endif
+diff --git a/drivers/misc/surface_aggregator/bus.c b/drivers/misc/surface_aggregator/bus.c
+new file mode 100644
+index 000000000000..5e734bbd18cd
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/bus.c
+@@ -0,0 +1,424 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module bus and device integration.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++#include "bus.h"
++#include "controller.h"
++
++
++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
++			     char *buf)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	return snprintf(buf, PAGE_SIZE - 1, "ssam:d%02Xc%02Xt%02Xi%02xf%02X\n",
++			sdev->uid.domain, sdev->uid.category, sdev->uid.target,
++			sdev->uid.instance, sdev->uid.function);
++}
++static DEVICE_ATTR_RO(modalias);
++
++static struct attribute *ssam_device_attrs[] = {
++	&dev_attr_modalias.attr,
++	NULL,
++};
++ATTRIBUTE_GROUPS(ssam_device);
++
++static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02xf%02X",
++			      sdev->uid.domain, sdev->uid.category,
++			      sdev->uid.target, sdev->uid.instance,
++			      sdev->uid.function);
++}
++
++static void ssam_device_release(struct device *dev)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	ssam_controller_put(sdev->ctrl);
++	kfree(sdev);
++}
++
++const struct device_type ssam_device_type = {
++	.name    = "surface_aggregator_device",
++	.groups  = ssam_device_groups,
++	.uevent  = ssam_device_uevent,
++	.release = ssam_device_release,
++};
++EXPORT_SYMBOL_GPL(ssam_device_type);
++
++
++/**
++ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
++ * @ctrl: The controller under which the device should be added.
++ * @uid:  The UID of the device to be added.
++ *
++ * Allocates and initializes a new client device. The parent of the device
++ * will be set to the controller device and the name will be set based on the
++ * UID. Note that the device still has to be added via ssam_device_add().
++ * Refer to that function for more details.
++ *
++ * Return: Returns the newly allocated and initialized SSAM client device, or
++ * %NULL if it could not be allocated.
++ */
++struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
++				      struct ssam_device_uid uid)
++{
++	struct ssam_device *sdev;
++
++	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
++	if (!sdev)
++		return NULL;
++
++	device_initialize(&sdev->dev);
++	sdev->dev.bus = &ssam_bus_type;
++	sdev->dev.type = &ssam_device_type;
++	sdev->dev.parent = ssam_controller_device(ctrl);
++	sdev->ctrl = ssam_controller_get(ctrl);
++	sdev->uid = uid;
++
++	dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
++		     sdev->uid.domain, sdev->uid.category, sdev->uid.target,
++		     sdev->uid.instance, sdev->uid.function);
++
++	return sdev;
++}
++EXPORT_SYMBOL_GPL(ssam_device_alloc);
++
++/**
++ * ssam_device_add() - Add a SSAM client device.
++ * @sdev: The SSAM client device to be added.
++ *
++ * Added client devices must be guaranteed to always have a valid and active
++ * controller. Thus, this function will fail with %-ENXIO if the controller of
++ * the device has not been initialized yet, has been suspended, or has been
++ * shut down.
++ *
++ * The caller of this function should ensure that the corresponding call to
++ * ssam_device_remove() is issued before the controller is shut down. If the
++ * added device is a direct child of the controller device (default), it will
++ * be automatically removed when the controller is shut down.
++ *
++ * By default, the controller device will become the parent of the newly
++ * created client device. The parent may be changed before ssam_device_add is
++ * called, but care must be taken that a) the correct suspend/resume ordering
++ * is guaranteed and b) the client device does not oultive the controller,
++ * i.e. that the device is removed before the controller is being shut down.
++ * In case these guarantees have to be manually enforced, please refer to the
++ * ssam_client_link() and ssam_client_bind() functions, which are intended to
++ * set up device-links for this purpose.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssam_device_add(struct ssam_device *sdev)
++{
++	int status;
++
++	/*
++	 * Ensure that we can only add new devices to a controller if it has
++	 * been started and is not going away soon. This works in combination
++	 * with ssam_controller_remove_clients to ensure driver presence for the
++	 * controller device, i.e. it ensures that the controller (sdev->ctrl)
++	 * is always valid and can be used for requests as long as the client
++	 * device we add here is registered as child under it. This essentially
++	 * guarantees that the client driver can always expect the preconditions
++	 * for functions like ssam_request_sync (controller has to be started
++	 * and is not suspended) to hold and thus does not have to check for
++	 * them.
++	 *
++	 * Note that for this to work, the controller has to be a parent device.
++	 * If it is not a direct parent, care has to be taken that the device is
++	 * removed via ssam_device_remove(), as device_unregister does not
++	 * remove child devices recursively.
++	 */
++	ssam_controller_statelock(sdev->ctrl);
++
++	if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_stateunlock(sdev->ctrl);
++		return -ENXIO;
++	}
++
++	status = device_add(&sdev->dev);
++
++	ssam_controller_stateunlock(sdev->ctrl);
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_device_add);
++
++/**
++ * ssam_device_remove() - Remove a SSAM client device.
++ * @sdev: The device to remove.
++ *
++ * Removes and unregisters the provided SSAM client device.
++ */
++void ssam_device_remove(struct ssam_device *sdev)
++{
++	device_unregister(&sdev->dev);
++}
++EXPORT_SYMBOL_GPL(ssam_device_remove);
++
++
++/**
++ * ssam_device_id_compatible() - Check if a device ID matches a UID.
++ * @id:  The device ID as potential match.
++ * @uid: The device UID matching against.
++ *
++ * Check if the given ID is a match for the given UID, i.e. if a device with
++ * the provided UID is compatible to the given ID following the match rules
++ * described in its &ssam_device_id.match_flags member.
++ *
++ * Return: Returns %true iff the given UID is compatible to the match rule
++ * described by the given ID, %false otherwise.
++ */
++static bool ssam_device_id_compatible(const struct ssam_device_id *id,
++				      struct ssam_device_uid uid)
++{
++	if (id->domain != uid.domain || id->category != uid.category)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
++		return false;
++
++	if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
++		return false;
++
++	return true;
++}
++
++/**
++ * ssam_device_id_is_null() - Check if a device ID is null.
++ * @id: The device ID to check.
++ *
++ * Check if a given device ID is null, i.e. all zeros. Used to check for the
++ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
++ *
++ * Return: Returns %true if the given ID represents a null ID, %false
++ * otherwise.
++ */
++static bool ssam_device_id_is_null(const struct ssam_device_id *id)
++{
++	return id->match_flags == 0
++		&& id->domain == 0
++		&& id->category == 0
++		&& id->target == 0
++		&& id->instance == 0
++		&& id->function == 0
++		&& id->driver_data == 0;
++}
++
++/**
++ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
++ * @table: The table to search in.
++ * @uid:   The UID to matched against the individual table entries.
++ *
++ * Find the first match for the provided device UID in the provided ID table
++ * and return it. Returns %NULL if no match could be found.
++ */
++const struct ssam_device_id *ssam_device_id_match(
++		const struct ssam_device_id *table,
++		const struct ssam_device_uid uid)
++{
++	const struct ssam_device_id *id;
++
++	for (id = table; !ssam_device_id_is_null(id); ++id)
++		if (ssam_device_id_compatible(id, uid))
++			return id;
++
++	return NULL;
++}
++EXPORT_SYMBOL_GPL(ssam_device_id_match);
++
++/**
++ * ssam_device_get_match() - Find and return the ID matching the device in the
++ * ID table of the bound driver.
++ * @dev: The device for which to get the matching ID table entry.
++ *
++ * Find the fist match for the UID of the device in the ID table of the
++ * currently bound driver and return it. Returns %NULL if the device does not
++ * have a driver bound to it, the driver does not have match_table (i.e. it is
++ * %NULL), or there is no match in the driver's match_table.
++ *
++ * This function essentially calls ssam_device_id_match() with the ID table of
++ * the bound device driver and the UID of the device.
++ *
++ * Return: Returns the first match for the UID of the device in the device
++ * driver's match table, or %NULL if no such match could be found.
++ */
++const struct ssam_device_id *ssam_device_get_match(
++		const struct ssam_device *dev)
++{
++	const struct ssam_device_driver *sdrv;
++
++	sdrv = to_ssam_device_driver(dev->dev.driver);
++	if (!sdrv)
++		return NULL;
++
++	if (!sdrv->match_table)
++		return NULL;
++
++	return ssam_device_id_match(sdrv->match_table, dev->uid);
++}
++EXPORT_SYMBOL_GPL(ssam_device_get_match);
++
++/**
++ * ssam_device_get_match_data() - Find the ID matching the device in hte
++ * ID table of the bound driver and return its ``driver_data`` member.
++ * @dev: The device for which to get the match data.
++ *
++ * Find the fist match for the UID of the device in the ID table of the
++ * corresponding driver and return its driver_data. Returns %NULL if the
++ * device does not have a driver bound to it, the driver does not have
++ * match_table (i.e. it is %NULL), there is no match in the driver's
++ * match_table, or the match does not have any driver_data.
++ *
++ * This function essentially calls ssam_device_get_match() and, if any match
++ * could be found, returns its ``struct ssam_device_id.driver_data`` member.
++ *
++ * Return: Returns the driver data associated with the first match for the UID
++ * of the device in the device driver's match table, or %NULL if no such match
++ * could be found.
++ */
++const void *ssam_device_get_match_data(const struct ssam_device *dev)
++{
++	const struct ssam_device_id *id;
++
++	id = ssam_device_get_match(dev);
++	if (!id)
++		return NULL;
++
++	return (const void *)id->driver_data;
++}
++EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
++
++
++static int ssam_bus_match(struct device *dev, struct device_driver *drv)
++{
++	struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	if (!is_ssam_device(dev))
++		return 0;
++
++	return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
++}
++
++static int ssam_bus_probe(struct device *dev)
++{
++	return to_ssam_device_driver(dev->driver)
++		->probe(to_ssam_device(dev));
++}
++
++static int ssam_bus_remove(struct device *dev)
++{
++	struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
++
++	if (sdrv->remove)
++		sdrv->remove(to_ssam_device(dev));
++
++	return 0;
++}
++
++struct bus_type ssam_bus_type = {
++	.name   = "surface_aggregator",
++	.match  = ssam_bus_match,
++	.probe  = ssam_bus_probe,
++	.remove = ssam_bus_remove,
++};
++EXPORT_SYMBOL_GPL(ssam_bus_type);
++
++
++/**
++ * __ssam_device_driver_register() - Register a SSAM client device driver.
++ * @sdrv:  The driver to register.
++ * @owner: The module owning the provided driver.
++ *
++ * Please refer to the ssam_device_driver_register() macro for the normal way
++ * to register a driver from inside its owning module.
++ */
++int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
++				  struct module *owner)
++{
++	sdrv->driver.owner = owner;
++	sdrv->driver.bus = &ssam_bus_type;
++
++	/* force drivers to async probe so I/O is possible in probe */
++	sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
++
++	return driver_register(&sdrv->driver);
++}
++EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
++
++/**
++ * ssam_device_driver_unregister - Unregister a SSAM device driver.
++ * @sdrv: The driver to unregister.
++ */
++void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
++{
++	driver_unregister(&sdrv->driver);
++}
++EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
++
++
++static int ssam_remove_device(struct device *dev, void *_data)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++
++	if (is_ssam_device(dev))
++		ssam_device_remove(sdev);
++
++	return 0;
++}
++
++/**
++ * ssam_controller_remove_clients() - Remove SSAM client devices registered as
++ * direct children under the given controller.
++ * @ctrl: The controller to remove all direct clients for.
++ *
++ * Remove all SSAM client devices registered as direct children under the
++ * given controller. Note that this only accounts for direct children ot the
++ * controller device. This does not take care of any client devices where the
++ * parent device has been manually set before calling ssam_device_add. Refer
++ * to ssam_device_add()/ssam_device_remove() for more details on those cases.
++ *
++ * To avoid new devices being added in parallel to this call, the main
++ * controller lock (not statelock) must be held during this (and if
++ * necessary, any subsequent deinitialization) call.
++ */
++void ssam_controller_remove_clients(struct ssam_controller *ctrl)
++{
++	struct device *dev;
++
++	dev = ssam_controller_device(ctrl);
++	device_for_each_child_reverse(dev, NULL, ssam_remove_device);
++}
++
++
++/**
++ * ssam_bus_register() - Register and set-up the SSAM client device bus.
++ */
++int ssam_bus_register(void)
++{
++	return bus_register(&ssam_bus_type);
++}
++
++/**
++ * ssam_bus_unregister() - Unregister the SSAM client device bus.
++ */
++void ssam_bus_unregister(void)
++{
++	return bus_unregister(&ssam_bus_type);
++}
+diff --git a/drivers/misc/surface_aggregator/bus.h b/drivers/misc/surface_aggregator/bus.h
+new file mode 100644
+index 000000000000..7712baaed6a5
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/bus.h
+@@ -0,0 +1,27 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module bus and device integration.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_BUS_H
++#define _SURFACE_AGGREGATOR_BUS_H
++
++#include <linux/surface_aggregator/controller.h>
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++void ssam_controller_remove_clients(struct ssam_controller *ctrl);
++
++int ssam_bus_register(void);
++void ssam_bus_unregister(void);
++
++#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++static inline void ssam_controller_remove_clients(struct ssam_controller *ctrl) {}
++static inline int ssam_bus_register(void) { return 0; }
++static inline void ssam_bus_unregister(void) {}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++#endif /* _SURFACE_AGGREGATOR_BUS_H */
+diff --git a/drivers/misc/surface_aggregator/clients/Kconfig b/drivers/misc/surface_aggregator/clients/Kconfig
+new file mode 100644
+index 000000000000..3c438cb3f0ca
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/Kconfig
+@@ -0,0 +1,151 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++config SURFACE_AGGREGATOR_CDEV
++	tristate "Surface System Aggregator Module User-Space Interface"
++	depends on SURFACE_AGGREGATOR
++	help
++	  Provides a misc-device interface to the Surface System Aggregator
++	  Module (SSAM) controller.
++
++	  This option provides a module (called surface_aggregator_cdev), that,
++	  when loaded, will add a client device (and its respective driver) to
++	  the SSAM controller. Said client device manages a misc-device
++	  interface (/dev/surface/aggregator), which can be used by user-space
++	  tools to directly communicate with the SSAM EC by sending requests and
++	  receiving the correspondign responses.
++
++	  The provided interface is intended for debugging and development only,
++	  and should not be used otherwise.
++
++config SURFACE_AGGREGATOR_REGISTRY
++	tristate "Surface System Aggregator Module Device Registry"
++	depends on SURFACE_AGGREGATOR_BUS
++	default m
++	help
++	  Device-registry and device-hubs for Surface System Aggregator Module
++	  (SSAM) devices.
++
++	  Provides a module and driver which act as device-registry for SSAM
++	  client devices that cannot be detected automatically, e.g. via ACPI.
++	  Such devices are instead provided via this registry and attached via
++	  device hubs, also provided in this module.
++
++	  Devices provided via this registry are:
++	  - performance / cooling mode device (all generations)
++	  - battery/AC devices (7th generation)
++	  - HID input devices (7th generation)
++
++	  Note that this module only provides the respective client devices.
++	  Drivers for these devices still need to be selected via the other
++	  options.
++
++config SURFACE_ACPI_NOTIFY
++	tristate "Surface ACPI Notify Driver"
++	depends on SURFACE_AGGREGATOR
++	default m
++	help
++	  Surface ACPI Notify (SAN) driver for Microsoft Surface devices.
++
++	  This driver provides support for the ACPI interface (called SAN) of
++	  the Surface System Aggregator Module (SSAM) EC. This interface is used
++	  on 5th- and 6th-generation Microsoft Surface devices (including
++	  Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in
++	  reduced functionality on the Surface Laptop 3) to execute SSAM
++	  requests directly from ACPI code, as well as receive SSAM events and
++	  turn them into ACPI notifications. It essentially acts as a
++	  translation layer between the SSAM controller and ACPI.
++
++	  Specifically, this driver may be needed for battery status reporting,
++	  thermal sensor access, and real-time clock information, depending on
++	  the Surface device in question.
++
++config SURFACE_BATTERY
++	tristate "Surface Battery Driver"
++	depends on SURFACE_AGGREGATOR_BUS
++	select POWER_SUPPLY
++	default m
++	help
++	  Driver for battery and AC-adapter devices connected/managed via the
++	  Surface System Aggregator Module (SSAM) EC.
++
++	  This driver provides battery-/AC-information and -status support for
++	  Surface devices where said data is not exposed via the standard ACPI
++	  devices. On those models (7th-generation, i.e. Surface pro 7, Surface
++	  Laptop 3, and Surface Book 3), battery-/AC-status and -information is
++	  instead handled directly via SSAM client devices.
++
++config SURFACE_DTX
++	tristate "Surface Detachment System Driver"
++	depends on SURFACE_AGGREGATOR
++	depends on INPUT
++	default m
++	help
++	  Driver for the Surface Book clipboard detachment system (DTX).
++
++	  On the Surface Book series devices, the display part containing the
++	  CPU (called the clipboard) can be detached from the base (containing a
++	  battery, the keyboard, and, optionally, a discrete GPU) by (if
++	  necessary) unlocking and opening the latch connecting both parts.
++
++	  This driver provides a user-space interface that can influence the
++	  behavior of this process, which includes the option to abort it in
++	  case the base is still in use or speed it up in case it is not.
++
++	  Note that this module can be built without support for the Surface
++	  Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
++	  some devices, specifically the Surface Book 3, will not be supported.
++
++config SURFACE_HID
++	tristate "Surface HID Transport Driver"
++	depends on SURFACE_AGGREGATOR_BUS
++	depends on HID
++	default m
++	help
++	  Transport driver for HID devices connected via the Surface System
++	  Aggregator Module (SSAM).
++
++	  This driver provides support for HID input devices (e.g. touchpad and
++	  keyboard) connected via SSAM. It is required for keyboard input on the
++	  Surface Laptop 1 and 2, as well as keyboard and touchpad input on the
++	  Surface Laptop 3 and Surface Book 3.
++
++	  Note that this module can be built without support for the Surface
++	  Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
++	  some devices, specifically the Surface Book 3 and Surface Laptop 3,
++	  will not be supported.
++
++config SURFACE_HOTPLUG
++	tristate "Surface Hot-Plug System Driver"
++	depends on SURFACE_AGGREGATOR
++	depends on SURFACE_ACPI_NOTIFY
++	depends on GPIO_SYSFS
++	default m
++	help
++	  Driver for the Surface discrete GPU (dGPU) hot-plug system.
++
++	  This driver manages the dGPU power on the Surface Books, including
++	  when hot-plugging it by detaching the clipboard (display part
++	  containing the CPU) from the base (containing the keyboard and dGPU)
++	  of the device when it is running. This driver also provides a
++	  user-space interface via which the dGPU power-state (on/off) can be
++	  set, allowing users to turn off the dGPU in order to reduce power
++	  consumption.
++
++config SURFACE_PERFMODE
++	tristate "Surface Performance-Mode Driver"
++	depends on SURFACE_AGGREGATOR_BUS
++	depends on SYSFS
++	default m
++	help
++	  Driver for the performance-/cooling-mode interface of Microsoft
++	  Surface devices.
++
++	  Microsoft Surface devices using the Surface System Aggregator Module
++	  (SSAM) can be switched between different performance modes. This,
++	  depending on the device, can influence their cooling behavior and may
++	  influence power limits, allowing users to choose between performance
++	  and higher power-draw, or lower power-draw and more silent operation.
++
++	  This driver provides a user-space interface (via sysfs) for
++	  controlling said mode via the corresponding client device.
+diff --git a/drivers/misc/surface_aggregator/clients/Makefile b/drivers/misc/surface_aggregator/clients/Makefile
+new file mode 100644
+index 000000000000..7320922ba755
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/Makefile
+@@ -0,0 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++
++obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)		+= surface_aggregator_cdev.o
++obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY)	+= surface_aggregator_registry.o
++obj-$(CONFIG_SURFACE_ACPI_NOTIFY)		+= surface_acpi_notify.o
++obj-$(CONFIG_SURFACE_BATTERY)			+= surface_battery.o
++obj-$(CONFIG_SURFACE_DTX)			+= surface_dtx.o
++obj-$(CONFIG_SURFACE_HID)			+= surface_hid.o
++obj-$(CONFIG_SURFACE_HOTPLUG)			+= surface_hotplug.o
++obj-$(CONFIG_SURFACE_PERFMODE) 			+= surface_perfmode.o
+diff --git a/drivers/misc/surface_aggregator/clients/surface_acpi_notify.c b/drivers/misc/surface_aggregator/clients/surface_acpi_notify.c
+new file mode 100644
+index 000000000000..9010f3aafd28
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_acpi_notify.c
+@@ -0,0 +1,884 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Driver for the Surface ACPI Notify (SAN) interface/shim.
++ *
++ * Translates communication from ACPI to Surface System Aggregator Module
++ * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
++ * events back to ACPI notifications. Allows handling of discrete GPU
++ * notifications sent from ACPI via the SAN interface by providing them to any
++ * registered external driver.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/notifier.h>
++#include <linux/platform_device.h>
++#include <linux/rwsem.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_acpi_notify.h>
++
++
++struct san_data {
++	struct device *dev;
++	struct ssam_controller *ctrl;
++
++	struct acpi_connection_info info;
++
++	struct ssam_event_notifier nf_bat;
++	struct ssam_event_notifier nf_tmp;
++};
++
++#define to_san_data(ptr, member) \
++	container_of(ptr, struct san_data, member)
++
++
++/* -- dGPU notifier interface. ---------------------------------------------- */
++
++struct san_rqsg_if {
++	struct rw_semaphore lock;
++	struct device *dev;
++	struct blocking_notifier_head nh;
++};
++
++static struct san_rqsg_if san_rqsg_if = {
++	.lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
++	.dev = NULL,
++	.nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
++};
++
++static int san_set_rqsg_interface_device(struct device *dev)
++{
++	int status = 0;
++
++	down_write(&san_rqsg_if.lock);
++	if (!san_rqsg_if.dev && dev)
++		san_rqsg_if.dev = dev;
++	else
++		status = -EBUSY;
++	up_write(&san_rqsg_if.lock);
++
++	return status;
++}
++
++/**
++ * san_client_link() - Link client as consumer to SAN device.
++ * @client: The client to link.
++ *
++ * Sets up a device link between the provided client device as consumer and
++ * the SAN device as provider. This function can be used to ensure that the
++ * SAN interface has been set up and will be set up for as long as the driver
++ * of the client device is bound. This guarantees that, during that time, all
++ * dGPU events will be received by any registered notifier.
++ *
++ * The link will be automatically removed once the client device's driver is
++ * unbound.
++ *
++ * Return: Returns zero on succes, %-ENXIO if the SAN interface has not been
++ * set up yet, and %-ENOMEM if device link creation failed.
++ */
++int san_client_link(struct device *client)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
++	struct device_link *link;
++
++	down_read(&san_rqsg_if.lock);
++
++	if (!san_rqsg_if.dev) {
++		up_read(&san_rqsg_if.lock);
++		return -ENXIO;
++	}
++
++	link = device_link_add(client, san_rqsg_if.dev, flags);
++	if (!link) {
++		up_read(&san_rqsg_if.lock);
++		return -ENOMEM;
++	}
++
++	if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
++		up_read(&san_rqsg_if.lock);
++		return -ENXIO;
++	}
++
++	up_read(&san_rqsg_if.lock);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(san_client_link);
++
++/**
++ * san_dgpu_notifier_register() - Register a SAN dGPU notifier.
++ * @nb: The notifier-block to register.
++ *
++ * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
++ * ACPI. The registered notifier will be called with &struct san_dgpu_event
++ * as notifier data and the command ID of that event as notifier action.
++ */
++int san_dgpu_notifier_register(struct notifier_block *nb)
++{
++	return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
++}
++EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
++
++/**
++ * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
++ * @nb: The notifier-block to unregister.
++ */
++int san_dgpu_notifier_unregister(struct notifier_block *nb)
++{
++	return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
++}
++EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
++
++static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
++{
++	int ret;
++
++	ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
++	return notifier_to_errno(ret);
++}
++
++
++/* -- ACPI _DSM event relay. ------------------------------------------------ */
++
++#define SAN_DSM_REVISION	0
++
++static const guid_t SAN_DSM_UUID =
++	GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
++		  0x48, 0x7c, 0x91, 0xab, 0x3c);
++
++enum san_dsm_event_fn {
++	SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
++	SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
++	SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
++	SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
++	SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
++	SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
++	SAN_DSM_EVENT_FN_THERMAL   = 0x09,
++	SAN_DSM_EVENT_FN_DPTF      = 0x0a,
++};
++
++enum sam_event_cid_bat {
++	SAM_EVENT_CID_BAT_BIX  = 0x15,
++	SAM_EVENT_CID_BAT_BST  = 0x16,
++	SAM_EVENT_CID_BAT_ADP  = 0x17,
++	SAM_EVENT_CID_BAT_PROT = 0x18,
++	SAM_EVENT_CID_BAT_DPTF = 0x4f,
++};
++
++enum sam_event_cid_tmp {
++	SAM_EVENT_CID_TMP_TRIP = 0x0b,
++};
++
++struct san_event_work {
++	struct delayed_work work;
++	struct device *dev;
++	struct ssam_event event;	// must be last
++};
++
++static int san_acpi_notify_event(struct device *dev, u64 func,
++				 union acpi_object *param)
++{
++	acpi_handle san = ACPI_HANDLE(dev);
++	union acpi_object *obj;
++	int status = 0;
++
++	if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func))
++		return 0;
++
++	dev_dbg(dev, "notify event 0x%02llx\n", func);
++
++	obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
++				      func, param, ACPI_TYPE_BUFFER);
++	if (!obj)
++		return -EFAULT;
++
++	if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
++		dev_err(dev, "got unexpected result from _DSM\n");
++		status = -EPROTO;
++	}
++
++	ACPI_FREE(obj);
++	return status;
++}
++
++static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
++{
++	int status;
++
++	status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
++	if (status)
++		return status;
++
++	/*
++	 * Enusre that the battery states get updated correctly.
++	 * When the battery is fully charged and an adapter is plugged in, it
++	 * sometimes is not updated correctly, instead showing it as charging.
++	 * Explicitly trigger battery updates to fix this.
++	 */
++
++	status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
++	if (status)
++		return status;
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
++}
++
++static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
++{
++	enum san_dsm_event_fn fn;
++
++	if (event->instance_id == 0x02)
++		fn = SAN_DSM_EVENT_FN_BAT2_INFO;
++	else
++		fn = SAN_DSM_EVENT_FN_BAT1_INFO;
++
++	return san_acpi_notify_event(dev, fn, NULL);
++}
++
++static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
++{
++	enum san_dsm_event_fn fn;
++
++	if (event->instance_id == 0x02)
++		fn = SAN_DSM_EVENT_FN_BAT2_STAT;
++	else
++		fn = SAN_DSM_EVENT_FN_BAT1_STAT;
++
++	return san_acpi_notify_event(dev, fn, NULL);
++}
++
++static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
++{
++	union acpi_object payload;
++
++	/*
++	 * The Surface ACPI expects a buffer and not a package. It specifically
++	 * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
++	 * acpica/nsarguments.c, but that warning can be safely ignored.
++	 */
++	payload.type = ACPI_TYPE_BUFFER;
++	payload.buffer.length = event->length;
++	payload.buffer.pointer = (u8 *)&event->data[0];
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
++}
++
++static unsigned long san_evt_bat_delay(u8 cid)
++{
++	switch (cid) {
++	case SAM_EVENT_CID_BAT_ADP:
++		/*
++		 * Wait for battery state to update before signalling adapter
++		 * change.
++		 */
++		return msecs_to_jiffies(5000);
++
++	case SAM_EVENT_CID_BAT_BST:
++		/* Ensure we do not miss anything important due to caching. */
++		return msecs_to_jiffies(2000);
++
++	default:
++		return 0;
++	}
++}
++
++static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
++{
++	int status;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_BIX:
++		status = san_evt_bat_bix(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_BST:
++		status = san_evt_bat_bst(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_ADP:
++		status = san_evt_bat_adp(dev, event);
++		break;
++
++	case SAM_EVENT_CID_BAT_PROT:
++		/*
++		 * TODO: Implement support for battery protection status change
++		 *       event.
++		 */
++		return true;
++
++	case SAM_EVENT_CID_BAT_DPTF:
++		status = san_evt_bat_dptf(dev, event);
++		break;
++
++	default:
++		return false;
++	}
++
++	if (status)
++		dev_err(dev, "error handling power event (cid = %x)\n",
++			event->command_id);
++
++	return true;
++}
++
++static void san_evt_bat_workfn(struct work_struct *work)
++{
++	struct san_event_work *ev;
++
++	ev = container_of(work, struct san_event_work, work.work);
++	san_evt_bat(&ev->event, ev->dev);
++	kfree(ev);
++}
++
++static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
++			  const struct ssam_event *event)
++{
++	struct san_data *d = to_san_data(nf, nf_bat);
++	struct san_event_work *work;
++	unsigned long delay = san_evt_bat_delay(event->command_id);
++
++	if (delay == 0)
++		return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
++
++	work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
++	if (!work)
++		return ssam_notifier_from_errno(-ENOMEM);
++
++	INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
++	work->dev = d->dev;
++
++	memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
++
++	schedule_delayed_work(&work->work, delay);
++	return SSAM_NOTIF_HANDLED;
++}
++
++static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
++{
++	union acpi_object param;
++
++	/*
++	 * The Surface ACPI expects an integer and not a package. This will
++	 * cause a warning in acpica/nsarguments.c, but that warning can be
++	 * safely ignored.
++	 */
++	param.type = ACPI_TYPE_INTEGER;
++	param.integer.value = event->instance_id;
++
++	return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, &param);
++}
++
++static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
++{
++	int status;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_TMP_TRIP:
++		status = san_evt_tmp_trip(dev, event);
++		break;
++
++	default:
++		return false;
++	}
++
++	if (status) {
++		dev_err(dev, "error handling thermal event (cid = %x)\n",
++			event->command_id);
++	}
++
++	return true;
++}
++
++static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
++			  const struct ssam_event *event)
++{
++	struct san_data *d = to_san_data(nf, nf_tmp);
++
++	return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
++}
++
++
++/* -- ACPI GSB OperationRegion handler -------------------------------------- */
++
++struct gsb_data_in {
++	u8 cv;
++} __packed;
++
++struct gsb_data_rqsx {
++	u8 cv;				// command value (san_gsb_request_cv)
++	u8 tc;				// target category
++	u8 tid;				// target ID
++	u8 iid;				// instance ID
++	u8 snc;				// expect-response-flag?
++	u8 cid;				// command ID
++	u16 cdl;			// payload length
++	u8 pld[];			// payload
++} __packed;
++
++struct gsb_data_etwl {
++	u8 cv;				// command value (should be 0x02)
++	u8 etw3;			// unknown
++	u8 etw4;			// unknown
++	u8 msg[];			// error message (ASCIIZ)
++} __packed;
++
++struct gsb_data_out {
++	u8 status;			// _SSH communication status
++	u8 len;				// _SSH payload length
++	u8 pld[];			// _SSH payload
++} __packed;
++
++union gsb_buffer_data {
++	struct gsb_data_in   in;	// common input
++	struct gsb_data_rqsx rqsx;	// RQSX input
++	struct gsb_data_etwl etwl;	// ETWL input
++	struct gsb_data_out  out;	// output
++};
++
++struct gsb_buffer {
++	u8 status;			// GSB AttribRawProcess status
++	u8 len;				// GSB AttribRawProcess length
++	union gsb_buffer_data data;
++} __packed;
++
++#define SAN_GSB_MAX_RQSX_PAYLOAD  (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
++#define SAN_GSB_MAX_RESPONSE	  (U8_MAX - 2 - sizeof(struct gsb_data_out))
++
++#define SAN_GSB_COMMAND		0
++
++enum san_gsb_request_cv {
++	SAN_GSB_REQUEST_CV_RQST = 0x01,
++	SAN_GSB_REQUEST_CV_ETWL = 0x02,
++	SAN_GSB_REQUEST_CV_RQSG = 0x03,
++};
++
++#define SAN_REQUEST_NUM_TRIES	5
++
++static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
++{
++	struct gsb_data_etwl *etwl = &b->data.etwl;
++
++	if (b->len < sizeof(struct gsb_data_etwl)) {
++		dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
++		return AE_OK;
++	}
++
++	dev_err(d->dev, "ETWL(0x%02x, 0x%02x): %.*s\n", etwl->etw3, etwl->etw4,
++		(unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
++		(char *)etwl->msg);
++
++	// indicate success
++	b->status = 0x00;
++	b->len = 0x00;
++
++	return AE_OK;
++}
++
++static struct gsb_data_rqsx *san_validate_rqsx(struct device *dev,
++		const char *type, struct gsb_buffer *b)
++{
++	struct gsb_data_rqsx *rqsx = &b->data.rqsx;
++
++	if (b->len < sizeof(struct gsb_data_rqsx)) {
++		dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
++		return NULL;
++	}
++
++	if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
++		dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
++			type, b->len, get_unaligned(&rqsx->cdl));
++		return NULL;
++	}
++
++	if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
++		dev_err(dev, "payload for %s package too large (cdl = %d)\n",
++			type, get_unaligned(&rqsx->cdl));
++		return NULL;
++	}
++
++	return rqsx;
++}
++
++static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
++{
++	gsb->status = 0x00;
++	gsb->len = 0x02;
++	gsb->data.out.status = (u8)(-status);
++	gsb->data.out.len = 0x00;
++}
++
++static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
++{
++	gsb->status = 0x00;
++	gsb->len = len + 2;
++	gsb->data.out.status = 0x00;
++	gsb->data.out.len = len;
++
++	if (len)
++		memcpy(&gsb->data.out.pld[0], ptr, len);
++}
++
++static acpi_status san_rqst_fixup_suspended(struct san_data *d,
++					    struct ssam_request *rqst,
++					    struct gsb_buffer *gsb)
++{
++	if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
++		u8 base_state = 1;
++
++		/* Base state quirk:
++		 * The base state may be queried from ACPI when the EC is still
++		 * suspended. In this case it will return '-EPERM'. This query
++		 * will only be triggered from the ACPI lid GPE interrupt, thus
++		 * we are either in laptop or studio mode (base status 0x01 or
++		 * 0x02). Furthermore, we will only get here if the device (and
++		 * EC) have been suspended.
++		 *
++		 * We now assume that the device is in laptop mode (0x01). This
++		 * has the drawback that it will wake the device when unfolding
++		 * it in studio mode, but it also allows us to avoid actively
++		 * waiting for the EC to wake up, which may incur a notable
++		 * delay.
++		 */
++
++		dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
++
++		gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
++		return AE_OK;
++	}
++
++	gsb_rqsx_response_error(gsb, -ENXIO);
++	return AE_OK;
++}
++
++static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
++{
++	u8 rspbuf[SAN_GSB_MAX_RESPONSE];
++	struct gsb_data_rqsx *gsb_rqst;
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	int status = 0;
++
++	gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
++	if (!gsb_rqst)
++		return AE_OK;
++
++	rqst.target_category = gsb_rqst->tc;
++	rqst.target_id = gsb_rqst->tid;
++	rqst.command_id = gsb_rqst->cid;
++	rqst.instance_id = gsb_rqst->iid;
++	rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
++	rqst.length = get_unaligned(&gsb_rqst->cdl);
++	rqst.payload = &gsb_rqst->pld[0];
++
++	rsp.capacity = ARRAY_SIZE(rspbuf);
++	rsp.length = 0;
++	rsp.pointer = &rspbuf[0];
++
++	// handle suspended device
++	if (d->dev->power.is_suspended) {
++		dev_warn(d->dev, "rqst: device is suspended, not executing\n");
++		return san_rqst_fixup_suspended(d, &rqst, buffer);
++	}
++
++	status = ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
++			    d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
++
++	if (!status) {
++		gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
++	} else {
++		dev_err(d->dev, "rqst: failed with error %d\n", status);
++		gsb_rqsx_response_error(buffer, status);
++	}
++
++	return AE_OK;
++}
++
++static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
++{
++	struct gsb_data_rqsx *gsb_rqsg;
++	struct san_dgpu_event evt;
++	int status;
++
++	gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
++	if (!gsb_rqsg)
++		return AE_OK;
++
++	evt.category = gsb_rqsg->tc;
++	evt.target = gsb_rqsg->tid;
++	evt.command = gsb_rqsg->cid;
++	evt.instance = gsb_rqsg->iid;
++	evt.length = get_unaligned(&gsb_rqsg->cdl);
++	evt.payload = &gsb_rqsg->pld[0];
++
++	status = san_dgpu_notifier_call(&evt);
++	if (!status) {
++		gsb_rqsx_response_success(buffer, NULL, 0);
++	} else {
++		dev_err(d->dev, "rqsg: failed with error %d\n", status);
++		gsb_rqsx_response_error(buffer, status);
++	}
++
++	return AE_OK;
++}
++
++static acpi_status san_opreg_handler(u32 function,
++		acpi_physical_address command, u32 bits, u64 *value64,
++		void *opreg_context, void *region_context)
++{
++	struct san_data *d = to_san_data(opreg_context, info);
++	struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
++	int accessor_type = (function & 0xFFFF0000) >> 16;
++
++	if (command != SAN_GSB_COMMAND) {
++		dev_warn(d->dev, "unsupported command: 0x%02llx\n", command);
++		return AE_OK;
++	}
++
++	if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
++		dev_err(d->dev, "invalid access type: 0x%02x\n", accessor_type);
++		return AE_OK;
++	}
++
++	// buffer must have at least contain the command-value
++	if (buffer->len == 0) {
++		dev_err(d->dev, "request-package too small\n");
++		return AE_OK;
++	}
++
++	switch (buffer->data.in.cv) {
++	case SAN_GSB_REQUEST_CV_RQST:
++		return san_rqst(d, buffer);
++
++	case SAN_GSB_REQUEST_CV_ETWL:
++		return san_etwl(d, buffer);
++
++	case SAN_GSB_REQUEST_CV_RQSG:
++		return san_rqsg(d, buffer);
++
++	default:
++		dev_warn(d->dev, "unsupported SAN0 request (cv: 0x%02x)\n",
++			 buffer->data.in.cv);
++		return AE_OK;
++	}
++}
++
++
++/* -- Driver setup. --------------------------------------------------------- */
++
++static int san_events_register(struct platform_device *pdev)
++{
++	struct san_data *d = platform_get_drvdata(pdev);
++	int status;
++
++	d->nf_bat.base.priority = 1;
++	d->nf_bat.base.fn = san_evt_bat_nf;
++	d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
++	d->nf_bat.event.id.instance = 0;
++	d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
++	d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
++
++	d->nf_tmp.base.priority = 1;
++	d->nf_tmp.base.fn = san_evt_tmp_nf;
++	d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
++	d->nf_tmp.event.id.instance = 0;
++	d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
++	d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
++
++	status = ssam_notifier_register(d->ctrl, &d->nf_bat);
++	if (status)
++		return status;
++
++	status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
++	if (status)
++		ssam_notifier_unregister(d->ctrl, &d->nf_bat);
++
++	return status;
++}
++
++static void san_events_unregister(struct platform_device *pdev)
++{
++	struct san_data *d = platform_get_drvdata(pdev);
++
++	ssam_notifier_unregister(d->ctrl, &d->nf_bat);
++	ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
++}
++
++#define san_consumer_printk(level, dev, handle, fmt, ...)			\
++do {										\
++	char *path = "<error getting consumer path>";				\
++	struct acpi_buffer buffer = {						\
++		.length = ACPI_ALLOCATE_BUFFER,					\
++		.pointer = NULL,						\
++	};									\
++										\
++	if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))	\
++		path = buffer.pointer;						\
++										\
++	dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__);			\
++	kfree(buffer.pointer);							\
++} while (0)
++
++#define san_consumer_dbg(dev, handle, fmt, ...) \
++	san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
++
++#define san_consumer_warn(dev, handle, fmt, ...) \
++	san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
++
++static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
++{
++	struct acpi_handle_list dep_devices;
++	acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
++	acpi_status status;
++	int i;
++
++	if (!acpi_has_method(handle, "_DEP"))
++		return false;
++
++	status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
++	if (ACPI_FAILURE(status)) {
++		san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
++		return false;
++	}
++
++	for (i = 0; i < dep_devices.count; i++) {
++		if (dep_devices.handles[i] == supplier)
++			return true;
++	}
++
++	return false;
++}
++
++static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
++				      void *context, void **rv)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
++	struct platform_device *pdev = context;
++	struct acpi_device *adev;
++	struct device_link *link;
++
++	if (!is_san_consumer(pdev, handle))
++		return AE_OK;
++
++	// ignore ACPI devices that are not present
++	if (acpi_bus_get_device(handle, &adev) != 0)
++		return AE_OK;
++
++	san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
++
++	// try to set up device links, ignore but log errors
++	link = device_link_add(&adev->dev, &pdev->dev, flags);
++	if (!link) {
++		san_consumer_warn(&pdev->dev, handle,
++				  "failed to create device link\n");
++		return AE_OK;
++	}
++
++	return AE_OK;
++}
++
++static int san_consumer_links_setup(struct platform_device *pdev)
++{
++	acpi_status status;
++
++	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
++				     ACPI_UINT32_MAX, san_consumer_setup, NULL,
++				     pdev, NULL);
++
++	return status ? -EFAULT : 0;
++}
++
++static int san_probe(struct platform_device *pdev)
++{
++	acpi_handle san = ACPI_HANDLE(&pdev->dev);
++	struct ssam_controller *ctrl;
++	struct san_data *data;
++	acpi_status astatus;
++	int status;
++
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status)
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++
++	status = san_consumer_links_setup(pdev);
++	if (status)
++		return status;
++
++	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	data->dev = &pdev->dev;
++	data->ctrl = ctrl;
++
++	platform_set_drvdata(pdev, data);
++
++	astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++			&san_opreg_handler, NULL, &data->info);
++	if (ACPI_FAILURE(astatus))
++		return -ENXIO;
++
++	status = san_events_register(pdev);
++	if (status)
++		goto err_enable_events;
++
++	status = san_set_rqsg_interface_device(&pdev->dev);
++	if (status)
++		goto err_install_dev;
++
++	acpi_walk_dep_device_list(san);
++	return 0;
++
++err_install_dev:
++	san_events_unregister(pdev);
++err_enable_events:
++	acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++					  &san_opreg_handler);
++	return status;
++}
++
++static int san_remove(struct platform_device *pdev)
++{
++	acpi_handle san = ACPI_HANDLE(&pdev->dev);
++
++	san_set_rqsg_interface_device(NULL);
++	acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
++					  &san_opreg_handler);
++	san_events_unregister(pdev);
++
++	/*
++	 * We have unregistered our event sources. Now we need to ensure that
++	 * all delayed works they may have spawned are run to completion.
++	 */
++	flush_scheduled_work();
++
++	return 0;
++}
++
++static const struct acpi_device_id san_match[] = {
++	{ "MSHW0091" },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, san_match);
++
++static struct platform_driver surface_acpi_notify = {
++	.probe = san_probe,
++	.remove = san_remove,
++	.driver = {
++		.name = "surface_acpi_notify",
++		.acpi_match_table = san_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_acpi_notify);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_aggregator_cdev.c b/drivers/misc/surface_aggregator/clients/surface_aggregator_cdev.c
+new file mode 100644
+index 000000000000..f5e81cd67357
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_aggregator_cdev.c
+@@ -0,0 +1,299 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
++ * misc device. Intended for debugging and development.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/rwsem.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include <linux/surface_aggregator/cdev.h>
++#include <linux/surface_aggregator/controller.h>
++
++#define SSAM_CDEV_DEVICE_NAME	"surface_aggregator_cdev"
++
++struct ssam_cdev {
++	struct kref kref;
++	struct rw_semaphore lock;
++	struct ssam_controller *ctrl;
++	struct miscdevice mdev;
++};
++
++static void __ssam_cdev_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct ssam_cdev, kref));
++}
++
++static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
++{
++	if (cdev)
++		kref_get(&cdev->kref);
++
++	return cdev;
++}
++
++static void ssam_cdev_put(struct ssam_cdev *cdev)
++{
++	if (cdev)
++		kref_put(&cdev->kref, __ssam_cdev_release);
++}
++
++static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
++{
++	struct miscdevice *mdev = filp->private_data;
++	struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
++
++	filp->private_data = ssam_cdev_get(cdev);
++	return stream_open(inode, filp);
++}
++
++static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
++{
++	ssam_cdev_put(filp->private_data);
++	return 0;
++}
++
++static long ssam_cdev_request(struct ssam_cdev *cdev, unsigned long arg)
++{
++	struct ssam_cdev_request __user *r;
++	struct ssam_cdev_request rqst;
++	struct ssam_request spec;
++	struct ssam_response rsp;
++	const void __user *plddata;
++	void __user *rspdata;
++	int status = 0, ret = 0, tmp;
++
++	r = (struct ssam_cdev_request __user *)arg;
++	ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
++	if (ret)
++		goto out;
++
++	plddata = u64_to_user_ptr(rqst.payload.data);
++	rspdata = u64_to_user_ptr(rqst.response.data);
++
++	// setup basic request fields
++	spec.target_category = rqst.target_category;
++	spec.target_id = rqst.target_id;
++	spec.command_id = rqst.command_id;
++	spec.instance_id = rqst.instance_id;
++	spec.flags = rqst.flags;
++	spec.length = rqst.payload.length;
++	spec.payload = NULL;
++
++	rsp.capacity = rqst.response.length;
++	rsp.length = 0;
++	rsp.pointer = NULL;
++
++	// get request payload from user-space
++	if (spec.length) {
++		if (!plddata) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		spec.payload = kzalloc(spec.length, GFP_KERNEL);
++		if (!spec.payload) {
++			status = -ENOMEM;
++			ret = -EFAULT;
++			goto out;
++		}
++
++		if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
++			ret = -EFAULT;
++			goto out;
++		}
++	}
++
++	// allocate response buffer
++	if (rsp.capacity) {
++		if (!rspdata) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
++		if (!rsp.pointer) {
++			status = -ENOMEM;
++			ret = -EFAULT;
++			goto out;
++		}
++	}
++
++	// perform request
++	status = ssam_request_sync(cdev->ctrl, &spec, &rsp);
++	if (status)
++		goto out;
++
++	// copy response to user-space
++	if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
++		ret = -EFAULT;
++
++out:
++	// always try to set response-length and status
++	tmp = put_user(rsp.length, &r->response.length);
++	if (tmp)
++		ret = tmp;
++
++	tmp = put_user(status, &r->status);
++	if (tmp)
++		ret = tmp;
++
++	// cleanup
++	kfree(spec.payload);
++	kfree(rsp.pointer);
++
++	return ret;
++}
++
++static long __ssam_cdev_device_ioctl(struct ssam_cdev *cdev, unsigned int cmd,
++				     unsigned long arg)
++{
++	switch (cmd) {
++	case SSAM_CDEV_REQUEST:
++		return ssam_cdev_request(cdev, arg);
++
++	default:
++		return -ENOTTY;
++	}
++}
++
++static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd,
++				   unsigned long arg)
++{
++	struct ssam_cdev *cdev = file->private_data;
++	long status;
++
++	// ensure that controller is valid for as long as we need it
++	if (down_read_killable(&cdev->lock))
++		return -ERESTARTSYS;
++
++	if (!cdev->ctrl) {
++		up_read(&cdev->lock);
++		return -ENODEV;
++	}
++
++	status = __ssam_cdev_device_ioctl(cdev, cmd, arg);
++
++	up_read(&cdev->lock);
++	return status;
++}
++
++static const struct file_operations ssam_controller_fops = {
++	.owner          = THIS_MODULE,
++	.open           = ssam_cdev_device_open,
++	.release        = ssam_cdev_device_release,
++	.unlocked_ioctl = ssam_cdev_device_ioctl,
++	.compat_ioctl   = ssam_cdev_device_ioctl,
++	.llseek         = noop_llseek,
++};
++
++static int ssam_dbg_device_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct ssam_cdev *cdev;
++	int status;
++
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status)
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++
++	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
++	if (!cdev)
++		return -ENOMEM;
++
++	kref_init(&cdev->kref);
++	init_rwsem(&cdev->lock);
++	cdev->ctrl = ctrl;
++
++	cdev->mdev.parent   = &pdev->dev;
++	cdev->mdev.minor    = MISC_DYNAMIC_MINOR;
++	cdev->mdev.name     = "surface_aggregator";
++	cdev->mdev.nodename = "surface/aggregator";
++	cdev->mdev.fops     = &ssam_controller_fops;
++
++	status = misc_register(&cdev->mdev);
++	if (status) {
++		kfree(cdev);
++		return status;
++	}
++
++	platform_set_drvdata(pdev, cdev);
++	return 0;
++}
++
++static int ssam_dbg_device_remove(struct platform_device *pdev)
++{
++	struct ssam_cdev *cdev = platform_get_drvdata(pdev);
++
++	misc_deregister(&cdev->mdev);
++
++	/*
++	 * The controller is only guaranteed to be valid for as long as the
++	 * driver is bound. Remove controller so that any lingering open files
++	 * cannot access it any more after we're gone.
++	 */
++	down_write(&cdev->lock);
++	cdev->ctrl = NULL;
++	up_write(&cdev->lock);
++
++	ssam_cdev_put(cdev);
++	return 0;
++}
++
++static struct platform_device *ssam_cdev_device;
++
++static struct platform_driver ssam_cdev_driver = {
++	.probe = ssam_dbg_device_probe,
++	.remove = ssam_dbg_device_remove,
++	.driver = {
++		.name = SSAM_CDEV_DEVICE_NAME,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static int __init ssam_debug_init(void)
++{
++	int status;
++
++	ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
++						 PLATFORM_DEVID_NONE);
++	if (!ssam_cdev_device)
++		return -ENOMEM;
++
++	status = platform_device_add(ssam_cdev_device);
++	if (status)
++		goto err_device;
++
++	status = platform_driver_register(&ssam_cdev_driver);
++	if (status)
++		goto err_driver;
++
++	return 0;
++
++err_driver:
++	platform_device_del(ssam_cdev_device);
++err_device:
++	platform_device_put(ssam_cdev_device);
++	return status;
++}
++module_init(ssam_debug_init);
++
++static void __exit ssam_debug_exit(void)
++{
++	platform_driver_unregister(&ssam_cdev_driver);
++	platform_device_unregister(ssam_cdev_device);
++}
++module_exit(ssam_debug_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_aggregator_registry.c b/drivers/misc/surface_aggregator/clients/surface_aggregator_registry.c
+new file mode 100644
+index 000000000000..7499c6d107cb
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_aggregator_registry.c
+@@ -0,0 +1,652 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module (SSAM) client device registry.
++ *
++ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
++ * cannot be auto-detected. Provides device-hubs for these devices.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/notifier.h>
++#include <linux/platform_device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++
++/* -- Device registry. ------------------------------------------------------ */
++
++static const struct software_node ssam_node_root = {
++	.name = "ssam_platform_hub",
++};
++
++static const struct software_node ssam_node_hub_main = {
++	.name = "ssam:00:00:01:00:00",
++	.parent = &ssam_node_root,
++};
++
++static const struct software_node ssam_node_hub_base = {
++	.name = "ssam:00:00:02:00:00",
++	.parent = &ssam_node_root,
++};
++
++static const struct software_node ssam_node_bat_ac = {
++	.name = "ssam:01:02:01:01:01",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_bat_main = {
++	.name = "ssam:01:02:01:01:00",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_bat_sb3base = {
++	.name = "ssam:01:02:02:01:00",
++	.parent = &ssam_node_hub_base,
++};
++
++static const struct software_node ssam_node_tmp_perf = {
++	.name = "ssam:01:03:01:00:01",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_bas_dtx = {
++	.name = "ssam:01:11:01:00:00",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_hid_main_keyboard = {
++	.name = "ssam:01:15:02:01:00",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_hid_main_touchpad = {
++	.name = "ssam:01:15:02:03:00",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_hid_main_iid5 = {
++	.name = "ssam:01:15:02:05:00",
++	.parent = &ssam_node_hub_main,
++};
++
++static const struct software_node ssam_node_hid_base_keyboard = {
++	.name = "ssam:01:15:02:01:00",
++	.parent = &ssam_node_hub_base,
++};
++
++static const struct software_node ssam_node_hid_base_touchpad = {
++	.name = "ssam:01:15:02:03:00",
++	.parent = &ssam_node_hub_base,
++};
++
++static const struct software_node ssam_node_hid_base_iid5 = {
++	.name = "ssam:01:15:02:05:00",
++	.parent = &ssam_node_hub_base,
++};
++
++static const struct software_node ssam_node_hid_base_iid6 = {
++	.name = "ssam:01:15:02:06:00",
++	.parent = &ssam_node_hub_base,
++};
++
++
++static const struct software_node *ssam_node_group_sb2[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sb3[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_hub_base,
++	&ssam_node_tmp_perf,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
++	&ssam_node_bat_sb3base,
++	&ssam_node_hid_base_keyboard,
++	&ssam_node_hid_base_touchpad,
++	&ssam_node_hid_base_iid5,
++	&ssam_node_hid_base_iid6,
++	&ssam_node_bas_dtx,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sl1[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sl2[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sl3[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
++	&ssam_node_hid_main_keyboard,
++	&ssam_node_hid_main_touchpad,
++	&ssam_node_hid_main_iid5,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sp5[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sp6[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	NULL,
++};
++
++static const struct software_node *ssam_node_group_sp7[] = {
++	&ssam_node_root,
++	&ssam_node_hub_main,
++	&ssam_node_tmp_perf,
++	&ssam_node_bat_ac,
++	&ssam_node_bat_main,
++	NULL,
++};
++
++
++/* -- Device registry helper functions. ------------------------------------- */
++
++static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
++{
++	u8 d, tc, tid, iid, fn;
++	int n;
++
++	n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
++	if (n != 5)
++		return -EINVAL;
++
++	uid->domain = d;
++	uid->category = tc;
++	uid->target = tid;
++	uid->instance = iid;
++	uid->function = fn;
++
++	return 0;
++}
++
++static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
++{
++	if (!is_ssam_device(dev))
++		return 0;
++
++	ssam_device_remove(to_ssam_device(dev));
++	return 0;
++}
++
++static void ssam_hub_remove_devices(struct device *parent)
++{
++	device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
++}
++
++static int ssam_hub_add_device(struct device *parent,
++			       struct ssam_controller *ctrl,
++			       struct fwnode_handle *node)
++{
++	struct ssam_device_uid uid;
++	struct ssam_device *sdev;
++	int status;
++
++	status = ssam_uid_from_string(fwnode_get_name(node), &uid);
++	if (status)
++		return -ENODEV;
++
++	sdev = ssam_device_alloc(ctrl, uid);
++	if (!sdev)
++		return -ENOMEM;
++
++	sdev->dev.parent = parent;
++	sdev->dev.fwnode = node;
++
++	status = ssam_device_add(sdev);
++	if (status)
++		ssam_device_put(sdev);
++
++	return status;
++}
++
++static int ssam_hub_add_devices(struct device *parent,
++				struct ssam_controller *ctrl,
++				struct fwnode_handle *node)
++{
++	struct fwnode_handle *child;
++	int status;
++
++	fwnode_for_each_child_node(node, child) {
++		status = ssam_hub_add_device(parent, ctrl, child);
++		if (status && status != -ENODEV)
++			goto err;
++	}
++
++	return 0;
++err:
++	ssam_hub_remove_devices(parent);
++	return status;
++}
++
++
++/* -- SSAM main-hub driver. ------------------------------------------------- */
++
++static int ssam_hub_probe(struct ssam_device *sdev)
++{
++	struct fwnode_handle *node = dev_fwnode(&sdev->dev);
++
++	if (!node)
++		return -ENODEV;
++
++	return ssam_hub_add_devices(&sdev->dev, sdev->ctrl, node);
++}
++
++static void ssam_hub_remove(struct ssam_device *sdev)
++{
++	ssam_hub_remove_devices(&sdev->dev);
++}
++
++static const struct ssam_device_id ssam_hub_match[] = {
++	{ SSAM_VDEV(HUB, 0x01, 0x00, 0x00) },
++	{ },
++};
++
++static struct ssam_device_driver ssam_hub_driver = {
++	.probe = ssam_hub_probe,
++	.remove = ssam_hub_remove,
++	.match_table = ssam_hub_match,
++	.driver = {
++		.name = "surface_aggregator_device_hub",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- SSAM base-hub driver. ------------------------------------------------- */
++
++enum ssam_base_hub_state {
++	SSAM_BASE_HUB_UNINITIALIZED,
++	SSAM_BASE_HUB_CONNECTED,
++	SSAM_BASE_HUB_DISCONNECTED,
++};
++
++struct ssam_base_hub {
++	struct ssam_device *sdev;
++
++	struct mutex lock;
++	enum ssam_base_hub_state state;
++
++	struct ssam_event_notifier notif;
++};
++
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0d,
++	.instance_id     = 0x00,
++});
++
++#define SSAM_BAS_OPMODE_TABLET		0x00
++#define SSAM_EVENT_BAS_CID_CONNECTION	0x0c
++
++static int ssam_base_hub_query_state(struct ssam_device *sdev,
++				     enum ssam_base_hub_state *state)
++{
++	u8 opmode;
++	int status;
++
++	status = ssam_bas_query_opmode(sdev->ctrl, &opmode);
++	if (status < 0) {
++		dev_err(&sdev->dev, "failed to query base state: %d\n", status);
++		return status;
++	}
++
++	if (opmode != SSAM_BAS_OPMODE_TABLET)
++		*state = SSAM_BASE_HUB_CONNECTED;
++	else
++		*state = SSAM_BASE_HUB_DISCONNECTED;
++
++	return 0;
++}
++
++
++static ssize_t ssam_base_hub_state_show(struct device *dev,
++					struct device_attribute *attr,
++					char *buf)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
++	bool connected;
++
++	mutex_lock(&hub->lock);
++	connected = hub->state == SSAM_BASE_HUB_CONNECTED;
++	mutex_unlock(&hub->lock);
++
++	return snprintf(buf, PAGE_SIZE - 1, "%d\n", connected);
++}
++
++static struct device_attribute ssam_base_hub_attr_state =
++	__ATTR(state, 0444, ssam_base_hub_state_show, NULL);
++
++static struct attribute *ssam_base_hub_attrs[] = {
++	&ssam_base_hub_attr_state.attr,
++	NULL,
++};
++
++const struct attribute_group ssam_base_hub_group = {
++	.attrs = ssam_base_hub_attrs,
++};
++
++
++static int ssam_base_hub_update(struct ssam_device *sdev,
++				enum ssam_base_hub_state new)
++{
++	struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
++	struct fwnode_handle *node = dev_fwnode(&sdev->dev);
++	int status = 0;
++
++	mutex_lock(&hub->lock);
++	if (hub->state == new) {
++		mutex_unlock(&hub->lock);
++		return 0;
++	}
++	hub->state = new;
++
++	if (hub->state == SSAM_BASE_HUB_CONNECTED)
++		status = ssam_hub_add_devices(&sdev->dev, sdev->ctrl, node);
++
++	if (hub->state != SSAM_BASE_HUB_CONNECTED || status)
++		ssam_hub_remove_devices(&sdev->dev);
++
++	mutex_unlock(&hub->lock);
++
++	if (status) {
++		dev_err(&sdev->dev, "failed to update base-hub devices: %d\n",
++			status);
++	}
++
++	return status;
++}
++
++static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf,
++			       const struct ssam_event *event)
++{
++	struct ssam_base_hub *hub;
++	struct ssam_device *sdev;
++	enum ssam_base_hub_state new;
++
++	hub = container_of(nf, struct ssam_base_hub, notif);
++	sdev = hub->sdev;
++
++	if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
++		return 0;
++
++	if (event->length < 1) {
++		dev_err(&sdev->dev, "unexpected payload size: %u\n",
++			event->length);
++		return 0;
++	}
++
++	if (event->data[0])
++		new = SSAM_BASE_HUB_CONNECTED;
++	else
++		new = SSAM_BASE_HUB_DISCONNECTED;
++
++	ssam_base_hub_update(sdev, new);
++
++	/*
++	 * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
++	 * consumed by the detachment system driver. We're just a (more or less)
++	 * silent observer.
++	 */
++	return 0;
++}
++
++static int ssam_base_hub_resume(struct device *dev)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	enum ssam_base_hub_state state;
++	int status;
++
++	status = ssam_base_hub_query_state(sdev, &state);
++	if (status)
++		return status;
++
++	return ssam_base_hub_update(sdev, state);
++}
++static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
++
++static int ssam_base_hub_probe(struct ssam_device *sdev)
++{
++	enum ssam_base_hub_state state;
++	struct ssam_base_hub *hub;
++	int status;
++
++	hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
++	if (!hub)
++		return -ENOMEM;
++
++	hub->sdev = sdev;
++	hub->state = SSAM_BASE_HUB_UNINITIALIZED;
++
++	hub->notif.base.priority = 1000;  // this notifier should run first
++	hub->notif.base.fn = ssam_base_hub_notif;
++	hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
++	hub->notif.event.id.instance = 0,
++	hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	status = ssam_notifier_register(sdev->ctrl, &hub->notif);
++	if (status)
++		return status;
++
++	ssam_device_set_drvdata(sdev, hub);
++
++	status = ssam_base_hub_query_state(sdev, &state);
++	if (status) {
++		ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++		return status;
++	}
++
++	status = ssam_base_hub_update(sdev, state);
++	if (status) {
++		ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++		return status;
++	}
++
++	status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
++	if (status) {
++		ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++		ssam_hub_remove_devices(&sdev->dev);
++	}
++
++	return status;
++}
++
++static void ssam_base_hub_remove(struct ssam_device *sdev)
++{
++	struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
++
++	sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
++
++	ssam_notifier_unregister(sdev->ctrl, &hub->notif);
++	ssam_hub_remove_devices(&sdev->dev);
++}
++
++static const struct ssam_device_id ssam_base_hub_match[] = {
++	{ SSAM_VDEV(HUB, 0x02, 0x00, 0x00) },
++	{ },
++};
++
++static struct ssam_device_driver ssam_base_hub_driver = {
++	.probe = ssam_base_hub_probe,
++	.remove = ssam_base_hub_remove,
++	.match_table = ssam_base_hub_match,
++	.driver = {
++		.name = "surface_aggregator_base_hub",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++		.pm = &ssam_base_hub_pm_ops,
++	},
++};
++
++
++/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
++
++static const struct acpi_device_id ssam_platform_hub_match[] = {
++	/* Surface Pro 4, 5, and 6 */
++	{ "MSHW0081", (unsigned long)ssam_node_group_sp5 },
++
++	/* Surface Pro 6 (OMBR >= 0x10) */
++	{ "MSHW0111", (unsigned long)ssam_node_group_sp6 },
++
++	/* Surface Pro 7 */
++	{ "MSHW0116", (unsigned long)ssam_node_group_sp7 },
++
++	/* Surface Book 2 */
++	{ "MSHW0107", (unsigned long)ssam_node_group_sb2 },
++
++	/* Surface Book 3 */
++	{ "MSHW0117", (unsigned long)ssam_node_group_sb3 },
++
++	/* Surface Laptop 1 */
++	{ "MSHW0086", (unsigned long)ssam_node_group_sl1 },
++
++	/* Surface Laptop 2 */
++	{ "MSHW0112", (unsigned long)ssam_node_group_sl2 },
++
++	/* Surface Laptop 3 (13", Intel) */
++	{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
++
++	/* Surface Laptop 3 (15", AMD) */
++	{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
++
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
++
++static int ssam_platform_hub_probe(struct platform_device *pdev)
++{
++	const struct software_node **nodes;
++	struct ssam_controller *ctrl;
++	struct fwnode_handle *root;
++	int status;
++
++	nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
++	if (!nodes)
++		return -ENODEV;
++
++	/*
++	 * As we're adding the SSAM client devices as children under this device
++	 * and not the SSAM controller, we need to add a device link to the
++	 * controller to ensure that we remove all of our devices before the
++	 * controller is removed. This also guarantees proper ordering for
++	 * suspend/resume of the devices on this hub.
++	 */
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status)
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++
++	status = software_node_register_node_group(nodes);
++	if (status)
++		return status;
++
++	root = software_node_fwnode(&ssam_node_root);
++	if (!root)
++		return -ENOENT;
++
++	set_secondary_fwnode(&pdev->dev, root);
++
++	status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
++	if (status) {
++		software_node_unregister_node_group(nodes);
++		return status;
++	}
++
++	platform_set_drvdata(pdev, nodes);
++	return 0;
++}
++
++static int ssam_platform_hub_remove(struct platform_device *pdev)
++{
++	const struct software_node **nodes = platform_get_drvdata(pdev);
++
++	ssam_hub_remove_devices(&pdev->dev);
++	set_secondary_fwnode(&pdev->dev, NULL);
++	software_node_unregister_node_group(nodes);
++	return 0;
++}
++
++static struct platform_driver ssam_platform_hub_driver = {
++	.probe = ssam_platform_hub_probe,
++	.remove = ssam_platform_hub_remove,
++	.driver = {
++		.name = "surface_aggregator_platform_hub",
++		.acpi_match_table = ssam_platform_hub_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- Module initialization. ------------------------------------------------ */
++
++static int __init ssam_device_hub_init(void)
++{
++	int status;
++
++	status = platform_driver_register(&ssam_platform_hub_driver);
++	if (status)
++		goto err_platform;
++
++	status = ssam_device_driver_register(&ssam_hub_driver);
++	if (status)
++		goto err_main;
++
++	status = ssam_device_driver_register(&ssam_base_hub_driver);
++	if (status)
++		goto err_base;
++
++	return 0;
++
++err_base:
++	ssam_device_driver_unregister(&ssam_hub_driver);
++err_main:
++	platform_driver_unregister(&ssam_platform_hub_driver);
++err_platform:
++	return status;
++}
++module_init(ssam_device_hub_init);
++
++static void __exit ssam_device_hub_exit(void)
++{
++	ssam_device_driver_unregister(&ssam_base_hub_driver);
++	ssam_device_driver_unregister(&ssam_hub_driver);
++	platform_driver_unregister(&ssam_platform_hub_driver);
++}
++module_exit(ssam_device_hub_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_battery.c b/drivers/misc/surface_aggregator/clients/surface_battery.c
+new file mode 100644
+index 000000000000..21ee212a945a
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_battery.c
+@@ -0,0 +1,1196 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface battery and AC device driver.
++ *
++ * Provides support for battery and AC devices connected via the Surface
++ * System Aggregator Module.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/delay.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/power_supply.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/device.h>
++
++#define SPWR_RETRY			3
++#define spwr_retry(fn, args...)		ssam_retry(fn, SPWR_RETRY, args)
++
++#define SPWR_AC_BAT_UPDATE_DELAY	msecs_to_jiffies(5000)
++
++
++/* -- Module parameters. ---------------------------------------------------- */
++
++static unsigned int cache_time = 1000;
++module_param(cache_time, uint, 0644);
++MODULE_PARM_DESC(cache_time, "battery state chaching time in milliseconds [default: 1000]");
++
++
++/* -- SAM Interface. -------------------------------------------------------- */
++
++enum sam_event_cid_bat {
++	SAM_EVENT_CID_BAT_BIX         = 0x15,
++	SAM_EVENT_CID_BAT_BST         = 0x16,
++	SAM_EVENT_CID_BAT_ADP         = 0x17,
++	SAM_EVENT_CID_BAT_PROT        = 0x18,
++	SAM_EVENT_CID_BAT_DPTF        = 0x53,
++};
++
++enum sam_battery_sta {
++	SAM_BATTERY_STA_OK            = 0x0f,
++	SAM_BATTERY_STA_PRESENT	      = 0x10,
++};
++
++enum sam_battery_state {
++	SAM_BATTERY_STATE_DISCHARGING = BIT(0),
++	SAM_BATTERY_STATE_CHARGING    = BIT(1),
++	SAM_BATTERY_STATE_CRITICAL    = BIT(2),
++};
++
++enum sam_battery_power_unit {
++	SAM_BATTERY_POWER_UNIT_mW     = 0,
++	SAM_BATTERY_POWER_UNIT_mA     = 1,
++};
++
++/* Equivalent to data returned in ACPI _BIX method, revision 0 */
++struct spwr_bix {
++	u8  revision;
++	__le32 power_unit;
++	__le32 design_cap;
++	__le32 last_full_charge_cap;
++	__le32 technology;
++	__le32 design_voltage;
++	__le32 design_cap_warn;
++	__le32 design_cap_low;
++	__le32 cycle_count;
++	__le32 measurement_accuracy;
++	__le32 max_sampling_time;
++	__le32 min_sampling_time;
++	__le32 max_avg_interval;
++	__le32 min_avg_interval;
++	__le32 bat_cap_granularity_1;
++	__le32 bat_cap_granularity_2;
++	__u8 model[21];
++	__u8 serial[11];
++	__u8 type[5];
++	__u8 oem_info[21];
++} __packed;
++
++static_assert(sizeof(struct spwr_bix) == 119);
++
++#define SPWR_BIX_REVISION		0
++
++/* Equivalent to data returned in ACPI _BST method */
++struct spwr_bst {
++	__le32 state;
++	__le32 present_rate;
++	__le32 remaining_cap;
++	__le32 present_voltage;
++} __packed;
++
++static_assert(sizeof(struct spwr_bst) == 16);
++
++#define SPWR_BATTERY_VALUE_UNKNOWN	0xffffffff
++
++/* Get battery status (_STA) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x01,
++});
++
++/* Get battery static information (_BIX) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bix, struct spwr_bix, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x02,
++});
++
++/* Get battery dynamic information (_BST) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bst, struct spwr_bst, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x03,
++});
++
++/* Set battery trip point (_BTP) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_btp, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x04,
++});
++
++/* Get platform power soruce for battery (DPTF PSRC) */
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psrc, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0d,
++});
++
++/*
++ * The following requests are currently unused. They are nevertheless included
++ * for documentation of the SAM interface.
++ */
++
++/* Get maximum platform power for battery (DPTF PMAX) */
++__always_unused
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_pmax, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0b,
++});
++
++/* Get adapter rating (DPTF ARTG) */
++__always_unused
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_artg, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0f,
++});
++
++/* Unknown (DPTF PSOC) */
++__always_unused
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psoc, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0c,
++});
++
++/* Unknown (DPTF CHGI/ INT3403 SPPC) */
++__always_unused
++static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_chgi, __le32, {
++	.target_category = SSAM_SSH_TC_BAT,
++	.command_id      = 0x0e,
++});
++
++
++/* -- Common power-subsystem interface. ------------------------------------- */
++
++struct spwr_psy_properties {
++	const char *name;
++	struct ssam_event_registry registry;
++};
++
++struct spwr_battery_device {
++	struct ssam_device *sdev;
++
++	char name[32];
++	struct power_supply *psy;
++	struct power_supply_desc psy_desc;
++
++	struct delayed_work update_work;
++
++	struct ssam_event_notifier notif;
++
++	struct mutex lock;
++	unsigned long timestamp;
++
++	__le32 sta;
++	struct spwr_bix bix;
++	struct spwr_bst bst;
++	u32 alarm;
++};
++
++struct spwr_ac_device {
++	struct ssam_device *sdev;
++
++	char name[32];
++	struct power_supply *psy;
++	struct power_supply_desc psy_desc;
++
++	struct ssam_event_notifier notif;
++
++	struct mutex lock;
++
++	__le32 state;
++};
++
++static enum power_supply_property spwr_ac_props[] = {
++	POWER_SUPPLY_PROP_ONLINE,
++};
++
++static enum power_supply_property spwr_battery_props_chg[] = {
++	POWER_SUPPLY_PROP_STATUS,
++	POWER_SUPPLY_PROP_PRESENT,
++	POWER_SUPPLY_PROP_TECHNOLOGY,
++	POWER_SUPPLY_PROP_CYCLE_COUNT,
++	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
++	POWER_SUPPLY_PROP_VOLTAGE_NOW,
++	POWER_SUPPLY_PROP_CURRENT_NOW,
++	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
++	POWER_SUPPLY_PROP_CHARGE_FULL,
++	POWER_SUPPLY_PROP_CHARGE_NOW,
++	POWER_SUPPLY_PROP_CAPACITY,
++	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
++	POWER_SUPPLY_PROP_MODEL_NAME,
++	POWER_SUPPLY_PROP_MANUFACTURER,
++	POWER_SUPPLY_PROP_SERIAL_NUMBER,
++};
++
++static enum power_supply_property spwr_battery_props_eng[] = {
++	POWER_SUPPLY_PROP_STATUS,
++	POWER_SUPPLY_PROP_PRESENT,
++	POWER_SUPPLY_PROP_TECHNOLOGY,
++	POWER_SUPPLY_PROP_CYCLE_COUNT,
++	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
++	POWER_SUPPLY_PROP_VOLTAGE_NOW,
++	POWER_SUPPLY_PROP_POWER_NOW,
++	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
++	POWER_SUPPLY_PROP_ENERGY_FULL,
++	POWER_SUPPLY_PROP_ENERGY_NOW,
++	POWER_SUPPLY_PROP_CAPACITY,
++	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
++	POWER_SUPPLY_PROP_MODEL_NAME,
++	POWER_SUPPLY_PROP_MANUFACTURER,
++	POWER_SUPPLY_PROP_SERIAL_NUMBER,
++};
++
++static bool spwr_battery_present(struct spwr_battery_device *bat)
++{
++	return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
++}
++
++static int spwr_battery_load_sta(struct spwr_battery_device *bat)
++{
++	return spwr_retry(ssam_bat_get_sta, bat->sdev, &bat->sta);
++}
++
++static int spwr_battery_load_bix(struct spwr_battery_device *bat)
++{
++	int status;
++
++	if (!spwr_battery_present(bat))
++		return 0;
++
++	status = spwr_retry(ssam_bat_get_bix, bat->sdev, &bat->bix);
++
++	// enforce NULL terminated strings in case anything goes wrong...
++	bat->bix.model[ARRAY_SIZE(bat->bix.model) - 1] = 0;
++	bat->bix.serial[ARRAY_SIZE(bat->bix.serial) - 1] = 0;
++	bat->bix.type[ARRAY_SIZE(bat->bix.type) - 1] = 0;
++	bat->bix.oem_info[ARRAY_SIZE(bat->bix.oem_info) - 1] = 0;
++
++	return status;
++}
++
++static int spwr_battery_load_bst(struct spwr_battery_device *bat)
++{
++	if (!spwr_battery_present(bat))
++		return 0;
++
++	return spwr_retry(ssam_bat_get_bst, bat->sdev, &bat->bst);
++}
++
++static int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat,
++					   u32 value)
++{
++	__le32 value_le = cpu_to_le32(value);
++
++	bat->alarm = value;
++	return spwr_retry(ssam_bat_set_btp, bat->sdev, &value_le);
++}
++
++static int spwr_battery_set_alarm(struct spwr_battery_device *bat, u32 value)
++{
++	int status;
++
++	mutex_lock(&bat->lock);
++	status = spwr_battery_set_alarm_unlocked(bat, value);
++	mutex_unlock(&bat->lock);
++
++	return status;
++}
++
++static int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat,
++					    bool cached)
++{
++	unsigned long cache_deadline;
++	int status;
++
++	cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
++	if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
++		return 0;
++
++	status = spwr_battery_load_sta(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bst(bat);
++	if (status)
++		return status;
++
++	bat->timestamp = jiffies;
++	return 0;
++}
++
++static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
++{
++	int status;
++
++	mutex_lock(&bat->lock);
++	status = spwr_battery_update_bst_unlocked(bat, cached);
++	mutex_unlock(&bat->lock);
++
++	return status;
++}
++
++static int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
++{
++	int status;
++
++	status = spwr_battery_load_sta(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bix(bat);
++	if (status)
++		return status;
++
++	status = spwr_battery_load_bst(bat);
++	if (status)
++		return status;
++
++	if (bat->bix.revision != SPWR_BIX_REVISION) {
++		dev_warn(&bat->sdev->dev, "unsupported battery revision: %u\n",
++			 bat->bix.revision);
++	}
++
++	bat->timestamp = jiffies;
++	return 0;
++}
++
++static int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
++{
++	int status;
++	u32 old = ac->state;
++
++	status = spwr_retry(ssam_bat_get_psrc, ac->sdev, &ac->state);
++	if (status < 0)
++		return status;
++
++	return old != ac->state;
++}
++
++static int spwr_ac_update(struct spwr_ac_device *ac)
++{
++	int status;
++
++	mutex_lock(&ac->lock);
++	status = spwr_ac_update_unlocked(ac);
++	mutex_unlock(&ac->lock);
++
++	return status;
++}
++
++static u32 sprw_battery_get_full_cap_safe(struct spwr_battery_device *bat)
++{
++	u32 full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
++
++	if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		full_cap = get_unaligned_le32(&bat->bix.design_cap);
++
++	return full_cap;
++}
++
++static bool spwr_battery_is_full(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 full_cap = sprw_battery_get_full_cap_safe(bat);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	return full_cap != SPWR_BATTERY_VALUE_UNKNOWN && full_cap != 0
++		&& remaining_cap != SPWR_BATTERY_VALUE_UNKNOWN
++		&& remaining_cap >= full_cap
++		&& state == 0;
++}
++
++static int spwr_battery_recheck_full(struct spwr_battery_device *bat)
++{
++	bool present;
++	u32 unit;
++	int status;
++
++	mutex_lock(&bat->lock);
++	unit = get_unaligned_le32(&bat->bix.power_unit);
++	present = spwr_battery_present(bat);
++
++	status = spwr_battery_update_bix_unlocked(bat);
++	if (status)
++		goto out;
++
++	// if battery has been attached, (re-)initialize alarm
++	if (!present && spwr_battery_present(bat)) {
++		u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
++
++		status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
++		if (status)
++			goto out;
++	}
++
++	/*
++	 * Warn if the unit has changed. This is something we genuinely don't
++	 * expect to happen, so make this a big warning. If it does, we'll
++	 * need to add support for it.
++	 */
++	WARN_ON(unit != get_unaligned_le32(&bat->bix.power_unit));
++
++out:
++	mutex_unlock(&bat->lock);
++
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	return status;
++}
++
++static int spwr_battery_recheck_status(struct spwr_battery_device *bat)
++{
++	int status;
++
++	status = spwr_battery_update_bst(bat, false);
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	return status;
++}
++
++static int spwr_battery_recheck_adapter(struct spwr_battery_device *bat)
++{
++	u32 full_cap = sprw_battery_get_full_cap_safe(bat);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return 0;
++
++	if (remaining_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return 0;
++
++	/*
++	 * Handle battery update quirk:
++	 * When the battery is fully charged and the adapter is plugged in or
++	 * removed, the EC does not send a separate event for the state
++	 * (charging/discharging) change. Furthermore it may take some time until
++	 * the state is updated on the battery. Schedule an update to solve this.
++	 */
++
++	if (remaining_cap >= full_cap)
++		schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
++
++	return 0;
++}
++
++static int spwr_ac_recheck(struct spwr_ac_device *ac)
++{
++	int status;
++
++	status = spwr_ac_update(ac);
++	if (status > 0)
++		power_supply_changed(ac->psy);
++
++	return status >= 0 ? 0 : status;
++}
++
++static u32 spwr_notify_bat(struct ssam_event_notifier *nf,
++			   const struct ssam_event *event)
++{
++	struct spwr_battery_device *bat;
++	int status;
++
++	bat = container_of(nf, struct spwr_battery_device, notif);
++
++	dev_dbg(&bat->sdev->dev, "power event (cid = 0x%02x, iid = %d, tid = %d)\n",
++		event->command_id, event->instance_id, event->target_id);
++
++	// handled here, needs to be handled for all targets/instances
++	if (event->command_id == SAM_EVENT_CID_BAT_ADP) {
++		status = spwr_battery_recheck_adapter(bat);
++		return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++	}
++
++	if (bat->sdev->uid.target != event->target_id)
++		return 0;
++
++	if (bat->sdev->uid.instance != event->instance_id)
++		return 0;
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_BIX:
++		status = spwr_battery_recheck_full(bat);
++		break;
++
++	case SAM_EVENT_CID_BAT_BST:
++		status = spwr_battery_recheck_status(bat);
++		break;
++
++	case SAM_EVENT_CID_BAT_PROT:
++		/*
++		 * TODO: Implement support for battery protection status change
++		 *       event.
++		 */
++		status = 0;
++		break;
++
++	case SAM_EVENT_CID_BAT_DPTF:
++		/*
++		 * TODO: Implement support for DPTF event.
++		 */
++		status = 0;
++		break;
++
++	default:
++		return 0;
++	}
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++static u32 spwr_notify_ac(struct ssam_event_notifier *nf,
++			  const struct ssam_event *event)
++{
++	struct spwr_ac_device *ac;
++	int status;
++
++	ac = container_of(nf, struct spwr_ac_device, notif);
++
++	dev_dbg(&ac->sdev->dev, "power event (cid = 0x%02x, iid = %d, tid = %d)\n",
++		event->command_id, event->instance_id, event->target_id);
++
++	/*
++	 * Allow events of all targets/instances here. Global adapter status
++	 * seems to be handled via target=1 and instance=1, but events are
++	 * reported on all targets/instances in use.
++	 *
++	 * While it should be enough to just listen on 1/1, listen everywhere to
++	 * make sure we don't miss anything.
++	 */
++
++	switch (event->command_id) {
++	case SAM_EVENT_CID_BAT_ADP:
++		status = spwr_ac_recheck(ac);
++		return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++
++	default:
++		return 0;
++	}
++}
++
++static void spwr_battery_update_bst_workfn(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct spwr_battery_device *bat;
++	int status;
++
++	bat = container_of(dwork, struct spwr_battery_device, update_work);
++
++	status = spwr_battery_update_bst(bat, false);
++	if (!status)
++		power_supply_changed(bat->psy);
++
++	if (status) {
++		dev_err(&bat->sdev->dev, "failed to update battery state: %d\n",
++			status);
++	}
++}
++
++static int spwr_battery_prop_status(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
++
++	if (state & SAM_BATTERY_STATE_DISCHARGING)
++		return POWER_SUPPLY_STATUS_DISCHARGING;
++
++	if (state & SAM_BATTERY_STATE_CHARGING)
++		return POWER_SUPPLY_STATUS_CHARGING;
++
++	if (spwr_battery_is_full(bat))
++		return POWER_SUPPLY_STATUS_FULL;
++
++	if (present_rate == 0)
++		return POWER_SUPPLY_STATUS_NOT_CHARGING;
++
++	return POWER_SUPPLY_STATUS_UNKNOWN;
++}
++
++static int spwr_battery_prop_technology(struct spwr_battery_device *bat)
++{
++	if (!strcasecmp("NiCd", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_NiCd;
++
++	if (!strcasecmp("NiMH", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_NiMH;
++
++	if (!strcasecmp("LION", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_LION;
++
++	if (!strncasecmp("LI-ION", bat->bix.type, 6))
++		return POWER_SUPPLY_TECHNOLOGY_LION;
++
++	if (!strcasecmp("LiP", bat->bix.type))
++		return POWER_SUPPLY_TECHNOLOGY_LIPO;
++
++	return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
++}
++
++static int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
++{
++	u32 full_cap = sprw_battery_get_full_cap_safe(bat);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return -ENODEV;
++
++	if (remaining_cap == SPWR_BATTERY_VALUE_UNKNOWN)
++		return -ENODEV;
++
++	return remaining_cap * 100 / full_cap;
++}
++
++static int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
++{
++	u32 state = get_unaligned_le32(&bat->bst.state);
++	u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
++
++	if (state & SAM_BATTERY_STATE_CRITICAL)
++		return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
++
++	if (spwr_battery_is_full(bat))
++		return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
++
++	if (remaining_cap <= bat->alarm)
++		return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
++
++	return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
++}
++
++static int spwr_ac_get_property(struct power_supply *psy,
++				enum power_supply_property psp,
++				union power_supply_propval *val)
++{
++	struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
++	int status;
++
++	mutex_lock(&ac->lock);
++
++	status = spwr_ac_update_unlocked(ac);
++	if (status)
++		goto out;
++
++	switch (psp) {
++	case POWER_SUPPLY_PROP_ONLINE:
++		val->intval = !!le32_to_cpu(ac->state);
++		break;
++
++	default:
++		status = -EINVAL;
++		goto out;
++	}
++
++out:
++	mutex_unlock(&ac->lock);
++	return status;
++}
++
++static int spwr_battery_get_property(struct power_supply *psy,
++				     enum power_supply_property psp,
++				     union power_supply_propval *val)
++{
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++	u32 value;
++	int status;
++
++	mutex_lock(&bat->lock);
++
++	status = spwr_battery_update_bst_unlocked(bat, true);
++	if (status)
++		goto out;
++
++	// abort if battery is not present
++	if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
++		status = -ENODEV;
++		goto out;
++	}
++
++	switch (psp) {
++	case POWER_SUPPLY_PROP_STATUS:
++		val->intval = spwr_battery_prop_status(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_PRESENT:
++		val->intval = spwr_battery_present(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_TECHNOLOGY:
++		val->intval = spwr_battery_prop_technology(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_CYCLE_COUNT:
++		value = get_unaligned_le32(&bat->bix.cycle_count);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
++		value = get_unaligned_le32(&bat->bix.design_voltage);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
++		value = get_unaligned_le32(&bat->bst.present_voltage);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CURRENT_NOW:
++	case POWER_SUPPLY_PROP_POWER_NOW:
++		value = get_unaligned_le32(&bat->bst.present_rate);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
++	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
++		value = get_unaligned_le32(&bat->bix.design_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_FULL:
++	case POWER_SUPPLY_PROP_ENERGY_FULL:
++		value = get_unaligned_le32(&bat->bix.last_full_charge_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CHARGE_NOW:
++	case POWER_SUPPLY_PROP_ENERGY_NOW:
++		value = get_unaligned_le32(&bat->bst.remaining_cap);
++		if (value != SPWR_BATTERY_VALUE_UNKNOWN)
++			val->intval = value * 1000;
++		else
++			status = -ENODEV;
++		break;
++
++	case POWER_SUPPLY_PROP_CAPACITY:
++		val->intval = spwr_battery_prop_capacity(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
++		val->intval = spwr_battery_prop_capacity_level(bat);
++		break;
++
++	case POWER_SUPPLY_PROP_MODEL_NAME:
++		val->strval = bat->bix.model;
++		break;
++
++	case POWER_SUPPLY_PROP_MANUFACTURER:
++		val->strval = bat->bix.oem_info;
++		break;
++
++	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
++		val->strval = bat->bix.serial;
++		break;
++
++	default:
++		status = -EINVAL;
++		break;
++	}
++
++out:
++	mutex_unlock(&bat->lock);
++	return status;
++}
++
++
++static ssize_t spwr_battery_alarm_show(struct device *dev,
++				       struct device_attribute *attr,
++				       char *buf)
++{
++	struct power_supply *psy = dev_get_drvdata(dev);
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++
++	return sprintf(buf, "%d\n", bat->alarm * 1000);
++}
++
++static ssize_t spwr_battery_alarm_store(struct device *dev,
++					struct device_attribute *attr,
++					const char *buf, size_t count)
++{
++	struct power_supply *psy = dev_get_drvdata(dev);
++	struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
++	unsigned long value;
++	int status;
++
++	status = kstrtoul(buf, 0, &value);
++	if (status)
++		return status;
++
++	if (!spwr_battery_present(bat))
++		return -ENODEV;
++
++	status = spwr_battery_set_alarm(bat, value / 1000);
++	if (status)
++		return status;
++
++	return count;
++}
++
++static const struct device_attribute alarm_attr = {
++	.attr = {.name = "alarm", .mode = 0644},
++	.show = spwr_battery_alarm_show,
++	.store = spwr_battery_alarm_store,
++};
++
++
++static void spwr_ac_init(struct spwr_ac_device *ac,
++			    struct ssam_device *sdev,
++			    struct ssam_event_registry registry,
++			    const char *name)
++{
++	mutex_init(&ac->lock);
++	strncpy(ac->name, name, ARRAY_SIZE(ac->name) - 1);
++
++	ac->sdev = sdev;
++
++	ac->notif.base.priority = 1;
++	ac->notif.base.fn = spwr_notify_ac;
++	ac->notif.event.reg = registry;
++	ac->notif.event.id.target_category = sdev->uid.category;
++	ac->notif.event.id.instance = 0;
++	ac->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	ac->psy_desc.name = ac->name;
++	ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
++	ac->psy_desc.properties = spwr_ac_props;
++	ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
++	ac->psy_desc.get_property = spwr_ac_get_property;
++}
++
++static void spwr_ac_destroy(struct spwr_ac_device *ac)
++{
++	mutex_destroy(&ac->lock);
++}
++
++static int spwr_ac_register(struct spwr_ac_device *ac)
++{
++	struct power_supply_config psy_cfg = {};
++	__le32 sta;
++	int status;
++
++	// make sure the device is there and functioning properly
++	status = spwr_retry(ssam_bat_get_sta, ac->sdev, &sta);
++	if (status)
++		return status;
++
++	if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
++		return -ENODEV;
++
++	psy_cfg.drv_data = ac;
++	ac->psy = power_supply_register(&ac->sdev->dev, &ac->psy_desc, &psy_cfg);
++	if (IS_ERR(ac->psy))
++		return PTR_ERR(ac->psy);
++
++	status = ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
++	if (status)
++		power_supply_unregister(ac->psy);
++
++	return status;
++}
++
++static int spwr_ac_unregister(struct spwr_ac_device *ac)
++{
++	ssam_notifier_unregister(ac->sdev->ctrl, &ac->notif);
++	power_supply_unregister(ac->psy);
++	return 0;
++}
++
++static void spwr_battery_init(struct spwr_battery_device *bat,
++			      struct ssam_device *sdev,
++			      struct ssam_event_registry registry,
++			      const char *name)
++{
++	mutex_init(&bat->lock);
++	strncpy(bat->name, name, ARRAY_SIZE(bat->name) - 1);
++
++	bat->sdev = sdev;
++
++	bat->notif.base.priority = 1;
++	bat->notif.base.fn = spwr_notify_bat;
++	bat->notif.event.reg = registry;
++	bat->notif.event.id.target_category = sdev->uid.category;
++	bat->notif.event.id.instance = 0;
++	bat->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	bat->psy_desc.name = bat->name;
++	bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
++	bat->psy_desc.get_property = spwr_battery_get_property;
++
++	INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
++}
++
++static void spwr_battery_destroy(struct spwr_battery_device *bat)
++{
++	mutex_destroy(&bat->lock);
++}
++
++static int spwr_battery_register(struct spwr_battery_device *bat)
++{
++	struct power_supply_config psy_cfg = {};
++	__le32 sta;
++	int status;
++
++	// make sure the device is there and functioning properly
++	status = spwr_retry(ssam_bat_get_sta, bat->sdev, &sta);
++	if (status)
++		return status;
++
++	if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
++		return -ENODEV;
++
++	status = spwr_battery_update_bix_unlocked(bat);
++	if (status)
++		return status;
++
++	if (spwr_battery_present(bat)) {
++		u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
++
++		status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
++		if (status)
++			return status;
++	}
++
++	switch (get_unaligned_le32(&bat->bix.power_unit)) {
++	case SAM_BATTERY_POWER_UNIT_mW:
++		bat->psy_desc.properties = spwr_battery_props_eng;
++		bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
++		break;
++
++	case SAM_BATTERY_POWER_UNIT_mA:
++		bat->psy_desc.properties = spwr_battery_props_chg;
++		bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
++		break;
++
++	default:
++		dev_err(&bat->sdev->dev, "unsupported battery power unit: %u\n",
++			get_unaligned_le32(&bat->bix.power_unit));
++		return -ENOTSUPP;
++	}
++
++	psy_cfg.drv_data = bat;
++	bat->psy = power_supply_register(&bat->sdev->dev, &bat->psy_desc, &psy_cfg);
++	if (IS_ERR(bat->psy))
++		return PTR_ERR(bat->psy);
++
++	status = ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
++	if (status)
++		goto err_notif;
++
++	status = device_create_file(&bat->psy->dev, &alarm_attr);
++	if (status)
++		goto err_file;
++
++	return 0;
++
++err_file:
++	ssam_notifier_unregister(bat->sdev->ctrl, &bat->notif);
++err_notif:
++	power_supply_unregister(bat->psy);
++	return status;
++}
++
++static void spwr_battery_unregister(struct spwr_battery_device *bat)
++{
++	ssam_notifier_unregister(bat->sdev->ctrl, &bat->notif);
++	cancel_delayed_work_sync(&bat->update_work);
++	device_remove_file(&bat->psy->dev, &alarm_attr);
++	power_supply_unregister(bat->psy);
++	mutex_destroy(&bat->lock);
++}
++
++
++/* -- Power management. ----------------------------------------------------- */
++
++#ifdef CONFIG_PM_SLEEP
++
++static int surface_battery_resume(struct device *dev)
++{
++	return spwr_battery_recheck_full(dev_get_drvdata(dev));
++}
++
++static int surface_ac_resume(struct device *dev)
++{
++	return spwr_ac_recheck(dev_get_drvdata(dev));
++}
++
++#else /* CONFIG_PM_SLEEP */
++
++#define surface_battery_resume	NULL
++#define surface_ac_resume	NULL
++
++#endif /* CONFIG_PM_SLEEP */
++
++SIMPLE_DEV_PM_OPS(surface_battery_pm_ops, NULL, surface_battery_resume);
++SIMPLE_DEV_PM_OPS(surface_ac_pm_ops, NULL, surface_ac_resume);
++
++
++/* -- Battery driver. ------------------------------------------------------- */
++
++static int surface_battery_probe(struct ssam_device *sdev)
++{
++	const struct spwr_psy_properties *p;
++	struct spwr_battery_device *bat;
++	int status;
++
++	p = ssam_device_get_match_data(sdev);
++	if (!p)
++		return -ENODEV;
++
++	bat = devm_kzalloc(&sdev->dev, sizeof(*bat), GFP_KERNEL);
++	if (!bat)
++		return -ENOMEM;
++
++	spwr_battery_init(bat, sdev, p->registry, p->name);
++	ssam_device_set_drvdata(sdev, bat);
++
++	status = spwr_battery_register(bat);
++	if (status)
++		spwr_battery_destroy(bat);
++
++	return status;
++}
++
++static void surface_battery_remove(struct ssam_device *sdev)
++{
++	struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
++
++	spwr_battery_unregister(bat);
++	spwr_battery_destroy(bat);
++}
++
++static const struct spwr_psy_properties spwr_psy_props_bat1 = {
++	.name = "BAT1",
++	.registry = SSAM_EVENT_REGISTRY_SAM,
++};
++
++static const struct spwr_psy_properties spwr_psy_props_bat2_sb3 = {
++	.name = "BAT2",
++	.registry = SSAM_EVENT_REGISTRY_KIP,
++};
++
++static const struct ssam_device_id surface_battery_match[] = {
++	{ SSAM_SDEV(BAT, 0x01, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1     },
++	{ SSAM_SDEV(BAT, 0x02, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_battery_match);
++
++static struct ssam_device_driver surface_battery_driver = {
++	.probe = surface_battery_probe,
++	.remove = surface_battery_remove,
++	.match_table = surface_battery_match,
++	.driver = {
++		.name = "surface_battery",
++		.pm = &surface_battery_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- AC driver. ------------------------------------------------------------ */
++
++static int surface_ac_probe(struct ssam_device *sdev)
++{
++	const struct spwr_psy_properties *p;
++	struct spwr_ac_device *ac;
++	int status;
++
++	p = ssam_device_get_match_data(sdev);
++	if (!p)
++		return -ENODEV;
++
++	ac = devm_kzalloc(&sdev->dev, sizeof(*ac), GFP_KERNEL);
++	if (!ac)
++		return -ENOMEM;
++
++	spwr_ac_init(ac, sdev, p->registry, p->name);
++	ssam_device_set_drvdata(sdev, ac);
++
++	status = spwr_ac_register(ac);
++	if (status)
++		spwr_ac_destroy(ac);
++
++	return status;
++}
++
++static void surface_ac_remove(struct ssam_device *sdev)
++{
++	struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
++
++	spwr_ac_unregister(ac);
++	spwr_ac_destroy(ac);
++}
++
++static const struct spwr_psy_properties spwr_psy_props_adp1 = {
++	.name = "ADP1",
++	.registry = SSAM_EVENT_REGISTRY_SAM,
++};
++
++static const struct ssam_device_id surface_ac_match[] = {
++	{ SSAM_SDEV(BAT, 0x01, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_ac_match);
++
++static struct ssam_device_driver surface_ac_driver = {
++	.probe = surface_ac_probe,
++	.remove = surface_ac_remove,
++	.match_table = surface_ac_match,
++	.driver = {
++		.name = "surface_ac",
++		.pm = &surface_ac_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init surface_battery_init(void)
++{
++	int status;
++
++	status = ssam_device_driver_register(&surface_battery_driver);
++	if (status)
++		return status;
++
++	status = ssam_device_driver_register(&surface_ac_driver);
++	if (status)
++		ssam_device_driver_unregister(&surface_battery_driver);
++
++	return status;
++}
++module_init(surface_battery_init);
++
++static void __exit surface_battery_exit(void)
++{
++	ssam_device_driver_unregister(&surface_battery_driver);
++	ssam_device_driver_unregister(&surface_ac_driver);
++}
++module_exit(surface_battery_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Battery/AC driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_dtx.c b/drivers/misc/surface_aggregator/clients/surface_dtx.c
+new file mode 100644
+index 000000000000..1ac1208edd13
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_dtx.c
+@@ -0,0 +1,1270 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
++ *
++ * Provides a user-space interface to properly handle clipboard/tablet
++ * (containing screen and processor) detachment from the base of the device
++ * (containing the keyboard and optionally a discrete GPU). Allows to
++ * acknowledge (to speed things up), abort (e.g. in case the dGPU is stil in
++ * use), or request detachment via user-space.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/input.h>
++#include <linux/ioctl.h>
++#include <linux/kernel.h>
++#include <linux/kfifo.h>
++#include <linux/kref.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/rwsem.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++#include <linux/surface_aggregator/dtx.h>
++
++
++/* -- SSAM interface. ------------------------------------------------------- */
++
++enum sam_event_cid_bas {
++	SAM_EVENT_CID_DTX_CONNECTION			= 0x0c,
++	SAM_EVENT_CID_DTX_REQUEST			= 0x0e,
++	SAM_EVENT_CID_DTX_CANCEL			= 0x0f,
++	SAM_EVENT_CID_DTX_LATCH_STATUS			= 0x11,
++};
++
++enum ssam_bas_base_state {
++	SSAM_BAS_BASE_STATE_DETACH_SUCCESS		= 0x00,
++	SSAM_BAS_BASE_STATE_ATTACHED			= 0x01,
++	SSAM_BAS_BASE_STATE_NOT_FEASIBLE		= 0x02,
++};
++
++enum ssam_bas_latch_status {
++	SSAM_BAS_LATCH_STATUS_CLOSED			= 0x00,
++	SSAM_BAS_LATCH_STATUS_OPENED			= 0x01,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN		= 0x02,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN	= 0x03,
++	SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE		= 0x04,
++};
++
++enum ssam_bas_cancel_reason {
++	SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE		= 0x00,  // low battery
++	SSAM_BAS_CANCEL_REASON_TIMEOUT			= 0x02,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN		= 0x03,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN	= 0x04,
++	SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE		= 0x05,
++};
++
++struct ssam_bas_base_info {
++	u8 state;
++	u8 base_id;
++} __packed;
++
++static_assert(sizeof(struct ssam_bas_base_info) == 2);
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x06,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x07,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x08,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x09,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0a,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0b,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0c,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x0d,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x11,
++	.instance_id     = 0x00,
++});
++
++
++/* -- Main structures. ------------------------------------------------------ */
++
++enum sdtx_device_state {
++	SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
++	SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
++	SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
++	SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
++};
++
++struct sdtx_device {
++	struct kref kref;
++	struct rw_semaphore lock;
++
++	struct device *dev;
++	struct ssam_controller *ctrl;
++	unsigned long flags;
++
++	struct miscdevice mdev;
++	wait_queue_head_t waitq;
++	struct mutex write_lock;
++	struct rw_semaphore client_lock;
++	struct list_head client_list;
++
++	struct delayed_work state_work;
++	struct {
++		struct ssam_bas_base_info base;
++		u8 device_mode;
++		u8 latch_status;
++	} state;
++
++	struct delayed_work mode_work;
++	struct input_dev *mode_switch;
++
++	struct ssam_event_notifier notif;
++};
++
++enum sdtx_client_state {
++	SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
++};
++
++struct sdtx_client {
++	struct sdtx_device *ddev;
++	struct list_head node;
++	unsigned long flags;
++
++	struct fasync_struct *fasync;
++
++	struct mutex read_lock;
++	DECLARE_KFIFO(buffer, u8, 512);
++};
++
++static void __sdtx_device_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct sdtx_device, kref));
++}
++
++static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
++{
++	if (ddev)
++		kref_get(&ddev->kref);
++
++	return ddev;
++}
++
++static void sdtx_device_put(struct sdtx_device *ddev)
++{
++	if (ddev)
++		kref_put(&ddev->kref, __sdtx_device_release);
++}
++
++
++/* -- Firmware value translations. ------------------------------------------ */
++
++static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
++{
++	switch (state) {
++	case SSAM_BAS_BASE_STATE_ATTACHED:
++		return SDTX_BASE_ATTACHED;
++
++	case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
++		return SDTX_BASE_DETACHED;
++
++	case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
++		return SDTX_DETACH_NOT_FEASIBLE;
++
++	default:
++		dev_err(ddev->dev, "unknown base state: 0x%02x\n", state);
++		return SDTX_UNKNOWN(state);
++	}
++}
++
++static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
++{
++	switch (status) {
++	case SSAM_BAS_LATCH_STATUS_CLOSED:
++		return SDTX_LATCH_CLOSED;
++
++	case SSAM_BAS_LATCH_STATUS_OPENED:
++		return SDTX_LATCH_OPENED;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
++		return SDTX_ERR_FAILED_TO_OPEN;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
++		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
++
++	case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
++		return SDTX_ERR_FAILED_TO_CLOSE;
++
++	default:
++		dev_err(ddev->dev, "unknown latch status: 0x%02x\n", status);
++		return SDTX_UNKNOWN(status);
++	}
++}
++
++static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
++{
++	switch (reason) {
++	case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
++		return SDTX_DETACH_NOT_FEASIBLE;
++
++	case SSAM_BAS_CANCEL_REASON_TIMEOUT:
++		return SDTX_DETACH_TIMEDOUT;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
++		return SDTX_ERR_FAILED_TO_OPEN;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
++		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
++
++	case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
++		return SDTX_ERR_FAILED_TO_CLOSE;
++
++	default:
++		dev_err(ddev->dev, "unknown cancel reason: 0x%02x\n", reason);
++		return SDTX_UNKNOWN(reason);
++	}
++}
++
++
++/* -- IOCTLs. --------------------------------------------------------------- */
++
++static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
++				    struct sdtx_base_info __user *buf)
++{
++	struct ssam_bas_base_info raw;
++	struct sdtx_base_info info;
++	int status;
++
++	status = ssam_bas_get_base(ddev->ctrl, &raw);
++	if (status < 0)
++		return status;
++
++	info.state = sdtx_translate_base_state(ddev, raw.state);
++	info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
++
++	if (copy_to_user(buf, &info, sizeof(info)))
++		return -EFAULT;
++
++	return 0;
++}
++
++static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
++{
++	u8 mode;
++	int status;
++
++	status = ssam_bas_get_device_mode(ddev->ctrl, &mode);
++	if (status < 0)
++		return status;
++
++	return put_user(mode, buf);
++}
++
++static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
++{
++	u8 latch;
++	int status;
++
++	status = ssam_bas_get_latch_status(ddev->ctrl, &latch);
++	if (status < 0)
++		return status;
++
++	return put_user(sdtx_translate_latch_status(ddev, latch), buf);
++}
++
++static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd,
++				unsigned long arg)
++{
++	struct sdtx_device *ddev = client->ddev;
++
++	switch (cmd) {
++	case SDTX_IOCTL_EVENTS_ENABLE:
++		set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
++		return 0;
++
++	case SDTX_IOCTL_EVENTS_DISABLE:
++		clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
++		return 0;
++
++	case SDTX_IOCTL_LATCH_LOCK:
++		return ssam_bas_latch_lock(ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_UNLOCK:
++		return ssam_bas_latch_unlock(ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_REQUEST:
++		return ssam_bas_latch_request(ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_CONFIRM:
++		return ssam_bas_latch_confirm(ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_HEARTBEAT:
++		return ssam_bas_latch_heartbeat(ddev->ctrl);
++
++	case SDTX_IOCTL_LATCH_CANCEL:
++		return ssam_bas_latch_cancel(ddev->ctrl);
++
++	case SDTX_IOCTL_GET_BASE_INFO:
++		return sdtx_ioctl_get_base_info(ddev,
++				(struct sdtx_base_info __user *)arg);
++
++	case SDTX_IOCTL_GET_DEVICE_MODE:
++		return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
++
++	case SDTX_IOCTL_GET_LATCH_STATUS:
++		return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
++
++	default:
++		return -EINVAL;
++	}
++}
++
++static long surface_dtx_ioctl(struct file *file, unsigned int cmd,
++			      unsigned long arg)
++{
++	struct sdtx_client *client = file->private_data;
++	long status;
++
++	if (down_read_killable(&client->ddev->lock))
++		return -ERESTARTSYS;
++
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
++		up_read(&client->ddev->lock);
++		return -ENODEV;
++	}
++
++	status = __surface_dtx_ioctl(client, cmd, arg);
++
++	up_read(&client->ddev->lock);
++	return status;
++}
++
++
++/* -- File operations. ------------------------------------------------------ */
++
++static int surface_dtx_open(struct inode *inode, struct file *file)
++{
++	struct sdtx_device *ddev;
++	struct sdtx_client *client;
++
++	ddev = container_of(file->private_data, struct sdtx_device, mdev);
++
++	// initialize client
++	client = kzalloc(sizeof(*client), GFP_KERNEL);
++	if (!client)
++		return -ENOMEM;
++
++	client->ddev = sdtx_device_get(ddev);
++
++	INIT_LIST_HEAD(&client->node);
++
++	mutex_init(&client->read_lock);
++	INIT_KFIFO(client->buffer);
++
++	file->private_data = client;
++
++	// attach client
++	down_write(&ddev->client_lock);
++
++	// do not add a new client if the device has been shut down
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++		up_write(&ddev->client_lock);
++		sdtx_device_put(client->ddev);
++		kfree(client);
++		return -ENODEV;
++	}
++
++	list_add_tail(&client->node, &ddev->client_list);
++	up_write(&ddev->client_lock);
++
++	stream_open(inode, file);
++	return 0;
++}
++
++static int surface_dtx_release(struct inode *inode, struct file *file)
++{
++	struct sdtx_client *client = file->private_data;
++
++	// detach client
++	down_write(&client->ddev->client_lock);
++	list_del(&client->node);
++	up_write(&client->ddev->client_lock);
++
++	sdtx_device_put(client->ddev);
++	kfree(client);
++
++	return 0;
++}
++
++static ssize_t surface_dtx_read(struct file *file, char __user *buf,
++				size_t count, loff_t *offs)
++{
++	struct sdtx_client *client = file->private_data;
++	struct sdtx_device *ddev = client->ddev;
++	unsigned int copied;
++	int status = 0;
++
++	if (down_read_killable(&ddev->lock))
++		return -ERESTARTSYS;
++
++	// make sure we're not shut down
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++		up_read(&ddev->lock);
++		return -ENODEV;
++	}
++
++	do {
++		// check availability, wait if necessary
++		if (kfifo_is_empty(&client->buffer)) {
++			up_read(&ddev->lock);
++
++			if (file->f_flags & O_NONBLOCK)
++				return -EAGAIN;
++
++			status = wait_event_interruptible(ddev->waitq,
++					!kfifo_is_empty(&client->buffer)
++					|| test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
++						    &ddev->flags));
++			if (status < 0)
++				return status;
++
++			if (down_read_killable(&client->ddev->lock))
++				return -ERESTARTSYS;
++
++			// need to check that we're not shut down again
++			if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
++				up_read(&ddev->lock);
++				return -ENODEV;
++			}
++		}
++
++		// try to read from fifo
++		if (mutex_lock_interruptible(&client->read_lock)) {
++			up_read(&ddev->lock);
++			return -ERESTARTSYS;
++		}
++
++		status = kfifo_to_user(&client->buffer, buf, count, &copied);
++		mutex_unlock(&client->read_lock);
++
++		if (status < 0) {
++			up_read(&ddev->lock);
++			return status;
++		}
++
++		// we might not have gotten anything, check this here
++		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
++			up_read(&ddev->lock);
++			return -EAGAIN;
++		}
++	} while (copied == 0);
++
++	up_read(&ddev->lock);
++	return copied;
++}
++
++static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
++{
++	struct sdtx_client *client = file->private_data;
++	__poll_t events = 0;
++
++	if (down_read_killable(&client->ddev->lock))
++		return -ERESTARTSYS;
++
++	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
++		up_read(&client->ddev->lock);
++		return EPOLLHUP | EPOLLERR;
++	}
++
++	poll_wait(file, &client->ddev->waitq, pt);
++
++	if (!kfifo_is_empty(&client->buffer))
++		events |= EPOLLIN | EPOLLRDNORM;
++
++	up_read(&client->ddev->lock);
++	return events;
++}
++
++static int surface_dtx_fasync(int fd, struct file *file, int on)
++{
++	struct sdtx_client *client = file->private_data;
++
++	return fasync_helper(fd, file, on, &client->fasync);
++}
++
++static const struct file_operations surface_dtx_fops = {
++	.owner          = THIS_MODULE,
++	.open           = surface_dtx_open,
++	.release        = surface_dtx_release,
++	.read           = surface_dtx_read,
++	.poll           = surface_dtx_poll,
++	.fasync         = surface_dtx_fasync,
++	.unlocked_ioctl = surface_dtx_ioctl,
++	.compat_ioctl   = surface_dtx_ioctl,
++	.llseek         = no_llseek,
++};
++
++
++/* -- Event handling/forwarding. -------------------------------------------- */
++
++/*
++ * The device operation mode is not immediately updated on the EC when the
++ * base has been connected, i.e. querying the device mode inside the
++ * connection event callback yields an outdated value. Thus, we can only
++ * determine the new tablet-mode switch and device mode values after some
++ * time.
++ *
++ * These delays have been chosen by experimenting. We first delay on connect
++ * events, then check and validate the device mode against the base state and
++ * if invalid delay again by the "recheck" delay.
++ */
++#define SDTX_DEVICE_MODE_DELAY_CONNECT	msecs_to_jiffies(100)
++#define SDTX_DEVICE_MODE_DELAY_RECHECK	msecs_to_jiffies(100)
++
++static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
++
++
++struct sdtx_status_event {
++	struct sdtx_event e;
++	__u16 v;
++} __packed;
++
++struct sdtx_base_info_event {
++	struct sdtx_event e;
++	struct sdtx_base_info v;
++} __packed;
++
++union sdtx_generic_event {
++	struct sdtx_event common;
++	struct sdtx_status_event status;
++	struct sdtx_base_info_event base;
++};
++
++/* Must be executed with ddev->write_lock held. */
++static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
++{
++	const size_t len = sizeof(struct sdtx_event) + evt->length;
++	struct sdtx_client *client;
++
++	down_read(&ddev->client_lock);
++	list_for_each_entry(client, &ddev->client_list, node) {
++		if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
++			continue;
++
++		if (likely(kfifo_avail(&client->buffer) >= len))
++			kfifo_in(&client->buffer, (const u8 *)evt, len);
++		else
++			dev_warn(ddev->dev, "event buffer overrun\n");
++
++		kill_fasync(&client->fasync, SIGIO, POLL_IN);
++	}
++	up_read(&ddev->client_lock);
++
++	wake_up_interruptible(&ddev->waitq);
++}
++
++static u32 sdtx_notifier(struct ssam_event_notifier *nf,
++			 const struct ssam_event *in)
++{
++	struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
++	union sdtx_generic_event event;
++	size_t len;
++
++	// validate event payload length
++	switch (in->command_id) {
++	case SAM_EVENT_CID_DTX_CONNECTION:
++		len = 2;
++		break;
++
++	case SAM_EVENT_CID_DTX_REQUEST:
++		len = 0;
++		break;
++
++	case SAM_EVENT_CID_DTX_CANCEL:
++		len = 1;
++		break;
++
++	case SAM_EVENT_CID_DTX_LATCH_STATUS:
++		len = 1;
++		break;
++
++	default:
++		return 0;
++	};
++
++	if (in->length != len) {
++		dev_err(ddev->dev, "unexpected payload size for event 0x%02x: "
++			"got %u, expected %zu", in->command_id, in->length, len);
++		return 0;
++	}
++
++	mutex_lock(&ddev->write_lock);
++
++	// translate event
++	switch (in->command_id) {
++	case SAM_EVENT_CID_DTX_CONNECTION:
++		clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
++
++		// if state has not changed: do not send new event
++		if (ddev->state.base.state == in->data[0]
++		    && ddev->state.base.base_id == in->data[1])
++			goto out;
++
++		ddev->state.base.state = in->data[0];
++		ddev->state.base.base_id = in->data[1];
++
++		event.base.e.length = sizeof(struct sdtx_base_info);
++		event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
++		event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
++		event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
++		break;
++
++	case SAM_EVENT_CID_DTX_REQUEST:
++		event.common.code = SDTX_EVENT_REQUEST;
++		event.common.length = 0;
++		break;
++
++	case SAM_EVENT_CID_DTX_CANCEL:
++		event.status.e.length = sizeof(u16);
++		event.status.e.code = SDTX_EVENT_CANCEL;
++		event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
++		break;
++
++	case SAM_EVENT_CID_DTX_LATCH_STATUS:
++		clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
++
++		// if state has not changed: do not send new event
++		if (ddev->state.latch_status == in->data[0])
++			goto out;
++
++		ddev->state.latch_status = in->data[0];
++
++		event.status.e.length = sizeof(u16);
++		event.status.e.code = SDTX_EVENT_LATCH_STATUS;
++		event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
++		break;
++	}
++
++	sdtx_push_event(ddev, &event.common);
++
++	// update device mode on base connection change
++	if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
++		unsigned long delay;
++
++		delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
++		sdtx_update_device_mode(ddev, delay);
++	}
++
++out:
++	mutex_unlock(&ddev->write_lock);
++	return SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- State update functions. ----------------------------------------------- */
++
++static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
++{
++	return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED)
++			&& (mode == SDTX_DEVICE_MODE_TABLET))
++		|| ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS)
++			&& (mode != SDTX_DEVICE_MODE_TABLET));
++}
++
++static void sdtx_device_mode_workfn(struct work_struct *work)
++{
++	struct sdtx_device *ddev;
++	struct sdtx_status_event event;
++	struct ssam_bas_base_info base;
++	int status, tablet;
++	u8 mode;
++
++	ddev = container_of(work, struct sdtx_device, mode_work.work);
++
++	// get operation mode
++	status = ssam_bas_get_device_mode(ddev->ctrl, &mode);
++	if (status) {
++		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
++		return;
++	}
++
++	// get base info
++	status = ssam_bas_get_base(ddev->ctrl, &base);
++	if (status) {
++		dev_err(ddev->dev, "failed to get base info: %d\n", status);
++		return;
++	}
++
++	/*
++	 * In some cases (specifically when attaching the base), the device
++	 * mode isn't updated right away. Thus we check if the device mode
++	 * makes sense for the given base state and try again later if it
++	 * doesn't.
++	 */
++	if (sdtx_device_mode_invalid(mode, base.state)) {
++		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
++		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
++		return;
++	}
++
++	mutex_lock(&ddev->write_lock);
++	clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
++
++	// avoid sending duplicate device-mode events
++	if (ddev->state.device_mode == mode) {
++		mutex_unlock(&ddev->write_lock);
++		return;
++	}
++
++	ddev->state.device_mode = mode;
++
++	event.e.length = sizeof(u16);
++	event.e.code = SDTX_EVENT_DEVICE_MODE;
++	event.v = mode;
++
++	sdtx_push_event(ddev, &event.e);
++
++	// send SW_TABLET_MODE event
++	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
++	input_sync(ddev->mode_switch);
++
++	mutex_unlock(&ddev->write_lock);
++}
++
++static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
++{
++	schedule_delayed_work(&ddev->mode_work, delay);
++}
++
++
++static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
++					    struct ssam_bas_base_info info)
++{
++	struct sdtx_base_info_event event;
++
++	// prevent duplicate events
++	if (ddev->state.base.state == info.state
++	    && ddev->state.base.base_id == info.base_id)
++		return;
++
++	ddev->state.base = info;
++
++	event.e.length = sizeof(struct sdtx_base_info);
++	event.e.code = SDTX_EVENT_BASE_CONNECTION;
++	event.v.state = sdtx_translate_base_state(ddev, info.state);
++	event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
++
++	sdtx_push_event(ddev, &event.e);
++}
++
++static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
++{
++	struct sdtx_status_event event;
++	int tablet;
++
++	/*
++	 * Note: This function must be called after updating the base state
++	 * via __sdtx_device_state_update_base(), as we rely on the updated
++	 * base state value in the validity check below.
++	 */
++
++	if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
++		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
++		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
++		return;
++	}
++
++	// prevent duplicate events
++	if (ddev->state.device_mode == mode)
++		return;
++
++	ddev->state.device_mode = mode;
++
++	// send event
++	event.e.length = sizeof(u16);
++	event.e.code = SDTX_EVENT_DEVICE_MODE;
++	event.v = mode;
++
++	sdtx_push_event(ddev, &event.e);
++
++	// send SW_TABLET_MODE event
++	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
++	input_sync(ddev->mode_switch);
++}
++
++static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
++{
++	struct sdtx_status_event event;
++
++	// prevent duplicate events
++	if (ddev->state.latch_status == status)
++		return;
++
++	ddev->state.latch_status = status;
++
++	event.e.length = sizeof(struct sdtx_base_info);
++	event.e.code = SDTX_EVENT_BASE_CONNECTION;
++	event.v = sdtx_translate_latch_status(ddev, status);
++
++	sdtx_push_event(ddev, &event.e);
++}
++
++static void sdtx_device_state_workfn(struct work_struct *work)
++{
++	struct sdtx_device *ddev;
++	struct ssam_bas_base_info base;
++	u8 mode, latch;
++	int status;
++
++	ddev = container_of(work, struct sdtx_device, state_work.work);
++
++	// mark everyting as dirty
++	set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
++	set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
++	set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
++
++	/*
++	 * Ensure that the state gets marked as dirty before continuing to
++	 * query it. Necessary to ensure that clear_bit() calls in
++	 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
++	 * bits if an event is received while updating the state here.
++	 */
++	smp_mb__after_atomic();
++
++	status = ssam_bas_get_base(ddev->ctrl, &base);
++	if (status) {
++		dev_err(ddev->dev, "failed to get base state: %d\n", status);
++		return;
++	}
++
++	status = ssam_bas_get_device_mode(ddev->ctrl, &mode);
++	if (status) {
++		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
++		return;
++	}
++
++	status = ssam_bas_get_latch_status(ddev->ctrl, &latch);
++	if (status) {
++		dev_err(ddev->dev, "failed to get latch status: %d\n", status);
++		return;
++	}
++
++	mutex_lock(&ddev->write_lock);
++
++	/*
++	 * If the respective dirty-bit has been cleared, an event has been
++	 * received, updating this state. The queried state may thus be out of
++	 * date. At this point, we can safely assume that the state provided
++	 * by the event is either up to date, or we're about to receive
++	 * another event updating it.
++	 */
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
++		__sdtx_device_state_update_base(ddev, base);
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
++		__sdtx_device_state_update_mode(ddev, mode);
++
++	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
++		__sdtx_device_state_update_latch(ddev, latch);
++
++	mutex_unlock(&ddev->write_lock);
++}
++
++static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
++{
++	schedule_delayed_work(&ddev->state_work, delay);
++}
++
++
++/* -- Common device initialization. ----------------------------------------- */
++
++static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
++			    struct ssam_controller *ctrl)
++{
++	int status, tablet_mode;
++
++	// basic initialization
++	kref_init(&ddev->kref);
++	ddev->dev = dev;
++	ddev->ctrl = ctrl;
++
++	ddev->mdev.minor = MISC_DYNAMIC_MINOR;
++	ddev->mdev.name = "surface_dtx";
++	ddev->mdev.nodename = "surface/dtx";
++	ddev->mdev.fops = &surface_dtx_fops;
++
++	ddev->notif.base.priority = 1;
++	ddev->notif.base.fn = sdtx_notifier;
++	ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
++	ddev->notif.event.id.instance = 0;
++	ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
++
++	init_waitqueue_head(&ddev->waitq);
++	mutex_init(&ddev->write_lock);
++	init_rwsem(&ddev->client_lock);
++	INIT_LIST_HEAD(&ddev->client_list);
++
++	INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
++	INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
++
++	/*
++	 * Get current device state. We want to guarantee that events are only
++	 * sent when state actually changes. Thus we cannot use special
++	 * "uninitialized" values, as that would cause problems when manually
++	 * querying the state in surface_dtx_pm_complete(). I.e. we would not
++	 * be able to detect state changes there if no change event has been
++	 * received between driver initialization and first device suspension.
++	 *
++	 * Note that we also need to do this before registring the event
++	 * notifier, as that may access the state values.
++	 */
++	status = ssam_bas_get_base(ddev->ctrl, &ddev->state.base);
++	if (status)
++		return status;
++
++	status = ssam_bas_get_device_mode(ddev->ctrl, &ddev->state.device_mode);
++	if (status)
++		return status;
++
++	status = ssam_bas_get_latch_status(ddev->ctrl, &ddev->state.latch_status);
++	if (status)
++		return status;
++
++	// set up tablet mode switch
++	ddev->mode_switch = input_allocate_device();
++	if (!ddev->mode_switch)
++		return -ENOMEM;
++
++	ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
++	ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
++	ddev->mode_switch->id.bustype = BUS_HOST;
++	ddev->mode_switch->dev.parent = ddev->dev;
++
++	tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
++	input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
++	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
++
++	status = input_register_device(ddev->mode_switch);
++	if (status) {
++		input_free_device(ddev->mode_switch);
++		return status;
++	}
++
++	// set up event notifier
++	status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
++	if (status)
++		goto err_notif;
++
++	// register miscdevice
++	status = misc_register(&ddev->mdev);
++	if (status)
++		goto err_mdev;
++
++	/*
++	 * Update device state in case it has changed between getting the
++	 * initial mode and registring the event notifier.
++	 */
++	sdtx_update_device_state(ddev, 0);
++	return 0;
++
++err_notif:
++	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
++	cancel_delayed_work_sync(&ddev->mode_work);
++err_mdev:
++	input_unregister_device(ddev->mode_switch);
++	return status;
++}
++
++static struct sdtx_device *sdtx_device_setup(struct device *dev,
++					     struct ssam_controller *ctrl)
++{
++	struct sdtx_device *ddev;
++	int status;
++
++	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
++	if (!ddev)
++		return ERR_PTR(-ENOMEM);
++
++	status = sdtx_device_init(ddev, dev, ctrl);
++	if (status) {
++		kfree(ddev);
++		return ERR_PTR(status);
++	}
++
++	return ddev;
++}
++
++static void sdtx_device_destroy(struct sdtx_device *ddev)
++{
++	struct sdtx_client *client;
++
++	// disable notifiers, prevent new events from arriving
++	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
++
++	// stop mode_work, prevent access to mode_switch
++	cancel_delayed_work_sync(&ddev->mode_work);
++
++	// with mode_work canceled, we can unregister the mode_switch
++	input_unregister_device(ddev->mode_switch);
++
++	/*
++	 * Mark device as shut-down. Prevent new clients from being added and
++	 * new operations from being executed.
++	 */
++	set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
++
++	// wake up async clients
++	down_write(&ddev->client_lock);
++	list_for_each_entry(client, &ddev->client_list, node) {
++		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
++	}
++	up_write(&ddev->client_lock);
++
++	// wake up blocking clients
++	wake_up_interruptible(&ddev->waitq);
++
++	/*
++	 * Wait for clients to finish their current operation. After this, the
++	 * controller and device references are guaranteed to be no longer in
++	 * use.
++	 */
++	down_write(&ddev->lock);
++	ddev->dev = NULL;
++	ddev->ctrl = NULL;
++	up_write(&ddev->lock);
++
++	// finally remove the misc-device
++	misc_deregister(&ddev->mdev);
++
++	/*
++	 * We're now guaranteed that sdtx_device_open() won't be called any
++	 * more, so we can now drop out reference.
++	 */
++	sdtx_device_put(ddev);
++}
++
++
++/* -- PM ops. --------------------------------------------------------------- */
++
++#ifdef CONFIG_PM_SLEEP
++
++static void surface_dtx_pm_complete(struct device *dev)
++{
++	struct sdtx_device *ddev = dev_get_drvdata(dev);
++
++	/*
++	 * Normally, the EC will store events while suspended (i.e. in
++	 * display-off state) and release them when resumed (i.e. transitioned
++	 * to display-on state). During hibernation, however, the EC will be
++	 * shut down and does not store events. Furthermore, events might be
++	 * dropped during prolonged suspension (it is scurrently unknown how
++	 * big this event buffer is and how it behaves on overruns).
++	 *
++	 * To prevent any problems, we update the device state here. We do
++	 * this delayed to ensure that any events sent by the EC directly
++	 * after resuming will be handled first. The delay below has been
++	 * chosen (experimentally), so that there should be ample time for
++	 * these events to be handled, before we check and, if necessary,
++	 * update the state.
++	 */
++	sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
++}
++
++static const struct dev_pm_ops surface_dtx_pm_ops = {
++	.complete = surface_dtx_pm_complete,
++};
++
++#else /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops surface_dtx_pm_ops = {};
++
++#endif /* CONFIG_PM_SLEEP */
++
++
++/* -- Platform driver. ------------------------------------------------------ */
++
++static int surface_dtx_platform_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct sdtx_device *ddev;
++	int status;
++
++	// link to EC
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status)
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++
++	ddev = sdtx_device_setup(&pdev->dev, ctrl);
++	if (IS_ERR(ddev))
++		return PTR_ERR(ddev);
++
++	platform_set_drvdata(pdev, ddev);
++	return 0;
++}
++
++static int surface_dtx_platform_remove(struct platform_device *pdev)
++{
++	sdtx_device_destroy(platform_get_drvdata(pdev));
++	return 0;
++}
++
++static const struct acpi_device_id surface_dtx_acpi_match[] = {
++	{ "MSHW0133", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
++
++static struct platform_driver surface_dtx_platform_driver = {
++	.probe = surface_dtx_platform_probe,
++	.remove = surface_dtx_platform_remove,
++	.driver = {
++		.name = "surface_dtx_pltf",
++		.acpi_match_table = surface_dtx_acpi_match,
++		.pm = &surface_dtx_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- SSAM device driver. --------------------------------------------------- */
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++static int surface_dtx_ssam_probe(struct ssam_device *sdev)
++{
++	struct sdtx_device *ddev;
++
++	ddev = sdtx_device_setup(&sdev->dev, sdev->ctrl);
++	if (IS_ERR(ddev))
++		return PTR_ERR(ddev);
++
++	ssam_device_set_drvdata(sdev, ddev);
++	return 0;
++}
++
++static void surface_dtx_ssam_remove(struct ssam_device *sdev)
++{
++	sdtx_device_destroy(ssam_device_get_drvdata(sdev));
++}
++
++static const struct ssam_device_id surface_dtx_ssam_match[] = {
++	{ SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
++
++static struct ssam_device_driver surface_dtx_ssam_driver = {
++	.probe = surface_dtx_ssam_probe,
++	.remove = surface_dtx_ssam_remove,
++	.match_table = surface_dtx_ssam_match,
++	.driver = {
++		.name = "surface_dtx",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static int ssam_dtx_driver_register(void)
++{
++	return ssam_device_driver_register(&surface_dtx_ssam_driver);
++}
++
++static void ssam_dtx_driver_unregister(void)
++{
++	ssam_device_driver_unregister(&surface_dtx_ssam_driver);
++}
++
++#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++static int ssam_dtx_driver_register(void)
++{
++	return 0;
++}
++
++static void ssam_dtx_driver_unregister(void)
++{
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init surface_dtx_init(void)
++{
++	int status;
++
++	status = ssam_dtx_driver_register();
++	if (status)
++		return status;
++
++	status = platform_driver_register(&surface_dtx_platform_driver);
++	if (status)
++		ssam_dtx_driver_unregister();
++
++	return status;
++}
++module_init(surface_dtx_init);
++
++static void __exit surface_dtx_exit(void)
++{
++	platform_driver_unregister(&surface_dtx_platform_driver);
++	ssam_dtx_driver_unregister();
++}
++module_exit(surface_dtx_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_hid.c b/drivers/misc/surface_aggregator/clients/surface_hid.c
+new file mode 100644
+index 000000000000..567da224e60e
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_hid.c
+@@ -0,0 +1,925 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface System Aggregator Module (SSAM) HID device driver.
++ *
++ * Provides support for HID input devices connected via the Surface System
++ * Aggregator Module.
++ *
++ * Copyright (C) 2019-2020 Blaž Hrastnik <blaz@mxxn.io>,
++ *                         Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/acpi.h>
++#include <linux/hid.h>
++#include <linux/input.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/types.h>
++#include <linux/usb/ch9.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/device.h>
++
++#define SHID_RETRY			3
++#define shid_retry(fn, args...)		ssam_retry(fn, SHID_RETRY, args)
++
++
++enum surface_hid_descriptor_entry {
++	SURFACE_HID_DESC_HID    = 0,
++	SURFACE_HID_DESC_REPORT = 1,
++	SURFACE_HID_DESC_ATTRS  = 2,
++};
++
++struct surface_hid_descriptor {
++	__u8 desc_len;			// = 9
++	__u8 desc_type;			// = HID_DT_HID
++	__le16 hid_version;
++	__u8 country_code;
++	__u8 num_descriptors;		// = 1
++
++	__u8 report_desc_type;		// = HID_DT_REPORT
++	__le16 report_desc_len;
++} __packed;
++
++static_assert(sizeof(struct surface_hid_descriptor) == 9);
++
++struct surface_hid_attributes {
++	__le32 length;
++	__le16 vendor;
++	__le16 product;
++	__le16 version;
++	__u8 _unknown[22];
++} __packed;
++
++static_assert(sizeof(struct surface_hid_attributes) == 32);
++
++struct surface_hid_device;
++
++struct surface_hid_device_ops {
++	int (*get_descriptor)(struct surface_hid_device *shid, u8 entry,
++			      u8 *buf, size_t len);
++	int (*output_report)(struct surface_hid_device *shid, u8 report_id,
++			     u8 *data, size_t len);
++	int (*get_feature_report)(struct surface_hid_device *shid, u8 report_id,
++				  u8 *data, size_t len);
++	int (*set_feature_report)(struct surface_hid_device *shid, u8 report_id,
++				  u8 *data, size_t len);
++};
++
++struct surface_hid_device {
++	struct device *dev;
++	struct ssam_controller *ctrl;
++	struct ssam_device_uid uid;
++
++	struct surface_hid_descriptor hid_desc;
++	struct surface_hid_attributes attrs;
++
++	struct ssam_event_notifier notif;
++	struct hid_device *hid;
++
++	struct surface_hid_device_ops ops;
++};
++
++
++/* -- SAM interface (HID). -------------------------------------------------- */
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++struct surface_hid_buffer_slice {
++	__u8 entry;
++	__le32 offset;
++	__le32 length;
++	__u8 end;
++	__u8 data[];
++} __packed;
++
++static_assert(sizeof(struct surface_hid_buffer_slice) == 10);
++
++enum surface_hid_cid {
++	SURFACE_HID_CID_OUTPUT_REPORT      = 0x01,
++	SURFACE_HID_CID_GET_FEATURE_REPORT = 0x02,
++	SURFACE_HID_CID_SET_FEATURE_REPORT = 0x03,
++	SURFACE_HID_CID_GET_DESCRIPTOR     = 0x04,
++};
++
++static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry,
++				   u8 *buf, size_t len)
++{
++	u8 buffer[sizeof(struct surface_hid_buffer_slice) + 0x76];
++	struct surface_hid_buffer_slice *slice;
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	u32 buffer_len, offset, length;
++	int status;
++
++	/*
++	 * Note: The 0x76 above has been chosen because that's what's used by
++	 * the Windows driver. Together with the header, this leads to a 128
++	 * byte payload in total.
++	 */
++
++	buffer_len = ARRAY_SIZE(buffer) - sizeof(struct surface_hid_buffer_slice);
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_HID_CID_GET_DESCRIPTOR;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(struct surface_hid_buffer_slice);
++	rqst.payload = buffer;
++
++	rsp.capacity = ARRAY_SIZE(buffer);
++	rsp.pointer = buffer;
++
++	slice = (struct surface_hid_buffer_slice *)buffer;
++	slice->entry = entry;
++	slice->end = 0;
++
++	offset = 0;
++	length = buffer_len;
++
++	while (!slice->end && offset < len) {
++		put_unaligned_le32(offset, &slice->offset);
++		put_unaligned_le32(length, &slice->length);
++
++		rsp.length = 0;
++
++		status = shid_retry(ssam_request_sync_onstack, shid->ctrl,
++				    &rqst, &rsp, sizeof(*slice));
++		if (status)
++			return status;
++
++		offset = get_unaligned_le32(&slice->offset);
++		length = get_unaligned_le32(&slice->length);
++
++		// don't mess stuff up in case we receive garbage
++		if (length > buffer_len || offset > len)
++			return -EPROTO;
++
++		if (offset + length > len)
++			length = len - offset;
++
++		memcpy(buf + offset, &slice->data[0], length);
++
++		offset += length;
++		length = buffer_len;
++	}
++
++	if (offset != len) {
++		dev_err(shid->dev, "unexpected descriptor length: got %u, "
++			"expected %zu\n", offset, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int ssam_hid_set_raw_report(struct surface_hid_device *shid,
++				   u8 report_id, bool feature, u8 *buf,
++				   size_t len)
++{
++	struct ssam_request rqst;
++	u8 cid;
++
++	if (feature)
++		cid = SURFACE_HID_CID_SET_FEATURE_REPORT;
++	else
++		cid = SURFACE_HID_CID_OUTPUT_REPORT;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.instance_id = shid->uid.instance;
++	rqst.command_id = cid;
++	rqst.flags = 0;
++	rqst.length = len;
++	rqst.payload = buf;
++
++	buf[0] = report_id;
++
++	return shid_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
++}
++
++static int ssam_hid_get_raw_report(struct surface_hid_device *shid,
++				   u8 report_id, u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.instance_id = shid->uid.instance;
++	rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
++	rqst.flags = 0;
++	rqst.length = sizeof(report_id);
++	rqst.payload = &report_id;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	return shid_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
++			  sizeof(report_id));
++}
++
++static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf,
++			     const struct ssam_event *event)
++{
++	struct surface_hid_device *shid;
++	int status;
++
++	shid = container_of(nf, struct surface_hid_device, notif);
++
++	if (event->command_id != 0x00)
++		return 0;
++
++	status = hid_input_report(shid->hid, HID_INPUT_REPORT,
++				  (u8 *)&event->data[0], event->length, 0);
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- Transport driver (HID). ----------------------------------------------- */
++
++static int shid_output_report(struct surface_hid_device *shid, u8 report_id,
++			      u8 *data, size_t len)
++{
++	int status;
++
++	status =  ssam_hid_set_raw_report(shid, report_id, false, data, len);
++	return status >= 0 ? len : status;
++}
++
++static int shid_get_feature_report(struct surface_hid_device *shid,
++				   u8 report_id, u8 *data, size_t len)
++{
++	int status;
++
++	status = ssam_hid_get_raw_report(shid, report_id, data, len);
++	return status >= 0 ? len : status;
++}
++
++static int shid_set_feature_report(struct surface_hid_device *shid,
++				   u8 report_id, u8 *data, size_t len)
++{
++	int status;
++
++	status =  ssam_hid_set_raw_report(shid, report_id, true, data, len);
++	return status >= 0 ? len : status;
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++
++/* -- SAM interface (KBD). -------------------------------------------------- */
++
++#define KBD_FEATURE_REPORT_SIZE		7  // 6 + report ID
++
++enum surface_kbd_cid {
++	SURFACE_KBD_CID_GET_DESCRIPTOR     = 0x00,
++	SURFACE_KBD_CID_SET_CAPSLOCK_LED   = 0x01,
++	SURFACE_KBD_CID_EVT_INPUT_GENERIC  = 0x03,
++	SURFACE_KBD_CID_EVT_INPUT_HOTKEYS  = 0x04,
++	SURFACE_KBD_CID_GET_FEATURE_REPORT = 0x0b,
++};
++
++static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry,
++				   u8 *buf, size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	int status;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_GET_DESCRIPTOR;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(entry);
++	rqst.payload = &entry;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	status = shid_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
++			    sizeof(entry));
++	if (status)
++		return status;
++
++	if (rsp.length != len) {
++		dev_err(shid->dev, "invalid descriptor length: got %zu, "
++			"expected, %zu\n", rsp.length, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
++{
++	struct ssam_request rqst;
++	u8 value_u8 = value;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_SET_CAPSLOCK_LED;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = 0;
++	rqst.length = sizeof(value_u8);
++	rqst.payload = &value_u8;
++
++	return shid_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL,
++			  sizeof(value_u8));
++}
++
++static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf,
++				       size_t len)
++{
++	struct ssam_request rqst;
++	struct ssam_response rsp;
++	u8 payload = 0;
++	int status;
++
++	rqst.target_category = shid->uid.category;
++	rqst.target_id = shid->uid.target;
++	rqst.command_id = SURFACE_KBD_CID_GET_FEATURE_REPORT;
++	rqst.instance_id = shid->uid.instance;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(payload);
++	rqst.payload = &payload;
++
++	rsp.capacity = len;
++	rsp.length = 0;
++	rsp.pointer = buf;
++
++	status = shid_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
++			    sizeof(payload));
++	if (status)
++		return status;
++
++	if (rsp.length != len) {
++		dev_err(shid->dev, "invalid feature report length: got %zu, "
++			"expected, %zu\n", rsp.length, len);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static bool ssam_kbd_is_input_event(const struct ssam_event *event)
++{
++	if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_GENERIC)
++		return true;
++
++	if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_HOTKEYS)
++		return true;
++
++	return false;
++}
++
++static u32 ssam_kbd_event_fn(struct ssam_event_notifier *nf,
++				const struct ssam_event *event)
++{
++	struct surface_hid_device *shid;
++	int status;
++
++	shid = container_of(nf, struct surface_hid_device, notif);
++
++	/*
++	 * Check against device UID manually, as registry and device target
++	 * category doesn't line up.
++	 */
++
++	if (shid->uid.category != event->target_category)
++		return 0;
++
++	if (shid->uid.target != event->target_id)
++		return 0;
++
++	if (shid->uid.instance != event->instance_id)
++		return 0;
++
++	if (!ssam_kbd_is_input_event(event))
++		return 0;
++
++	status = hid_input_report(shid->hid, HID_INPUT_REPORT,
++				  (u8 *)&event->data[0], event->length, 0);
++
++	return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
++}
++
++
++/* -- Transport driver (KBD). ----------------------------------------------- */
++
++static int skbd_get_caps_led_value(struct hid_device *hid, u8 report_id,
++				   u8 *data, size_t len)
++{
++	struct hid_field *field;
++	unsigned int offset, size;
++	int i;
++
++	// get led field
++	field = hidinput_get_led_field(hid);
++	if (!field)
++		return -ENOENT;
++
++	// check if we got the correct report
++	if (len != hid_report_len(field->report))
++		return -ENOENT;
++
++	if (report_id != field->report->id)
++		return -ENOENT;
++
++	// get caps lock led index
++	for (i = 0; i < field->report_count; i++)
++		if ((field->usage[i].hid & 0xffff) == 0x02)
++			break;
++
++	if (i == field->report_count)
++		return -ENOENT;
++
++	// extract value
++	size = field->report_size;
++	offset = field->report_offset + i * size;
++	return !!hid_field_extract(hid, data + 1, size, offset);
++}
++
++static int skbd_output_report(struct surface_hid_device *shid, u8 report_id,
++			      u8 *data, size_t len)
++{
++	int caps_led;
++	int status;
++
++	caps_led = skbd_get_caps_led_value(shid->hid, report_id, data, len);
++	if (caps_led < 0)
++		return -EIO;	// only caps output reports are supported
++
++	status = ssam_kbd_set_caps_led(shid, caps_led);
++	if (status < 0)
++		return status;
++
++	return len;
++}
++
++static int skbd_get_feature_report(struct surface_hid_device *shid,
++				   u8 report_id, u8 *data, size_t len)
++{
++	u8 report[KBD_FEATURE_REPORT_SIZE];
++	int status;
++
++	/*
++	 * The keyboard only has a single hard-coded read-only feature report
++	 * of size KBD_FEATURE_REPORT_SIZE. Try to load it and compare its
++	 * report ID against the requested one.
++	 */
++
++	if (len < ARRAY_SIZE(report))
++		return -ENOSPC;
++
++	status = ssam_kbd_get_feature_report(shid, report, ARRAY_SIZE(report));
++	if (status < 0)
++		return status;
++
++	if (report_id != report[0])
++		return -ENOENT;
++
++	memcpy(data, report, ARRAY_SIZE(report));
++	return len;
++}
++
++static int skbd_set_feature_report(struct surface_hid_device *shid,
++				   u8 report_id, u8 *data, size_t len)
++{
++	return -EIO;
++}
++
++
++/* -- Device descriptor access. --------------------------------------------- */
++
++static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
++			(u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
++	if (status)
++		return status;
++
++	if (shid->hid_desc.desc_len != sizeof(shid->hid_desc)) {
++		dev_err(shid->dev, "unexpected HID descriptor length: got %u, "
++			"expected %zu\n", shid->hid_desc.desc_len,
++			sizeof(shid->hid_desc));
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.desc_type != HID_DT_HID) {
++		dev_err(shid->dev, "unexpected HID descriptor type: got 0x%x, "
++			"expected 0x%x\n", shid->hid_desc.desc_type,
++			HID_DT_HID);
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.num_descriptors != 1) {
++		dev_err(shid->dev, "unexpected number of descriptors: got %u, "
++			"expected 1\n", shid->hid_desc.num_descriptors);
++		return -EPROTO;
++	}
++
++	if (shid->hid_desc.report_desc_type != HID_DT_REPORT) {
++		dev_err(shid->dev, "unexpected report descriptor type: got 0x%x, "
++			"expected 0x%x\n", shid->hid_desc.report_desc_type,
++			HID_DT_REPORT);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
++			(u8 *)&shid->attrs, sizeof(shid->attrs));
++	if (status)
++		return status;
++
++	if (get_unaligned_le32(&shid->attrs.length) != sizeof(shid->attrs)) {
++		dev_err(shid->dev, "unexpected attribute length: got %u, "
++			"expected %zu\n", get_unaligned_le32(&shid->attrs.length),
++			sizeof(shid->attrs));
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++
++/* -- Transport driver (common). -------------------------------------------- */
++
++static int surface_hid_start(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	return ssam_notifier_register(shid->ctrl, &shid->notif);
++}
++
++static void surface_hid_stop(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	// Note: This call will log errors for us, so ignore them here.
++	ssam_notifier_unregister(shid->ctrl, &shid->notif);
++}
++
++static int surface_hid_open(struct hid_device *hid)
++{
++	return 0;
++}
++
++static void surface_hid_close(struct hid_device *hid)
++{
++}
++
++static int surface_hid_parse(struct hid_device *hid)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++	size_t len = get_unaligned_le16(&shid->hid_desc.report_desc_len);
++	u8 *buf;
++	int status;
++
++	buf = kzalloc(len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_REPORT, buf, len);
++	if (!status)
++		status = hid_parse_report(hid, buf, len);
++
++	kfree(buf);
++	return status;
++}
++
++static int surface_hid_raw_request(struct hid_device *hid,
++		unsigned char reportnum, u8 *buf, size_t len,
++		unsigned char rtype, int reqtype)
++{
++	struct surface_hid_device *shid = hid->driver_data;
++
++	if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
++		return shid->ops.output_report(shid, reportnum, buf, len);
++
++	else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT)
++		return shid->ops.get_feature_report(shid, reportnum, buf, len);
++
++	else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT)
++		return shid->ops.set_feature_report(shid, reportnum, buf, len);
++
++	return -EIO;
++}
++
++static struct hid_ll_driver surface_hid_ll_driver = {
++	.start       = surface_hid_start,
++	.stop        = surface_hid_stop,
++	.open        = surface_hid_open,
++	.close       = surface_hid_close,
++	.parse       = surface_hid_parse,
++	.raw_request = surface_hid_raw_request,
++};
++
++
++/* -- Common device setup. -------------------------------------------------- */
++
++static int surface_hid_device_add(struct surface_hid_device *shid)
++{
++	int status;
++
++	status = surface_hid_load_hid_descriptor(shid);
++	if (status)
++		return status;
++
++	status = surface_hid_load_device_attributes(shid);
++	if (status)
++		return status;
++
++	shid->hid = hid_allocate_device();
++	if (IS_ERR(shid->hid))
++		return PTR_ERR(shid->hid);
++
++	shid->hid->dev.parent = shid->dev;
++	shid->hid->bus = BUS_HOST;		// TODO: BUS_SURFACE
++	shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
++	shid->hid->product = cpu_to_le16(shid->attrs.product);
++	shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
++	shid->hid->country = shid->hid_desc.country_code;
++
++	snprintf(shid->hid->name, sizeof(shid->hid->name),
++		 "Microsoft Surface %04X:%04X",
++		 shid->hid->vendor, shid->hid->product);
++
++	strlcpy(shid->hid->phys, dev_name(shid->dev), sizeof(shid->hid->phys));
++
++	shid->hid->driver_data = shid;
++	shid->hid->ll_driver = &surface_hid_ll_driver;
++
++	status = hid_add_device(shid->hid);
++	if (status)
++		hid_destroy_device(shid->hid);
++
++	return status;
++}
++
++static void surface_hid_device_destroy(struct surface_hid_device *shid)
++{
++	hid_destroy_device(shid->hid);
++}
++
++
++/* -- PM ops. --------------------------------------------------------------- */
++
++#ifdef CONFIG_PM
++
++static int surface_hid_suspend(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
++
++	return 0;
++}
++
++static int surface_hid_resume(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->resume)
++		return d->hid->driver->resume(d->hid);
++
++	return 0;
++}
++
++static int surface_hid_freeze(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
++
++	return 0;
++}
++
++static int surface_hid_poweroff(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->suspend)
++		return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
++
++	return 0;
++}
++
++static int surface_hid_restore(struct device *dev)
++{
++	struct surface_hid_device *d = dev_get_drvdata(dev);
++
++	if (d->hid->driver && d->hid->driver->reset_resume)
++		return d->hid->driver->reset_resume(d->hid);
++
++	return 0;
++}
++
++const struct dev_pm_ops surface_hid_pm_ops = {
++	.freeze   = surface_hid_freeze,
++	.thaw     = surface_hid_resume,
++	.suspend  = surface_hid_suspend,
++	.resume   = surface_hid_resume,
++	.poweroff = surface_hid_poweroff,
++	.restore  = surface_hid_restore,
++};
++
++#else /* CONFIG_PM */
++
++const struct dev_pm_ops surface_hid_pm_ops = { };
++
++#endif /* CONFIG_PM */
++
++
++/* -- Driver setup (HID). --------------------------------------------------- */
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
++
++static int surface_hid_probe(struct ssam_device *sdev)
++{
++	struct surface_hid_device *shid;
++
++	shid = devm_kzalloc(&sdev->dev, sizeof(*shid), GFP_KERNEL);
++	if (!shid)
++		return -ENOMEM;
++
++	shid->dev = &sdev->dev;
++	shid->ctrl = sdev->ctrl;
++	shid->uid = sdev->uid;
++
++	shid->notif.base.priority = 1;
++	shid->notif.base.fn = ssam_hid_event_fn;
++	shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG,
++	shid->notif.event.id.target_category = sdev->uid.category;
++	shid->notif.event.id.instance = sdev->uid.instance;
++	shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
++	shid->notif.event.flags = 0;
++
++	shid->ops.get_descriptor = ssam_hid_get_descriptor;
++	shid->ops.output_report = shid_output_report;
++	shid->ops.get_feature_report = shid_get_feature_report;
++	shid->ops.set_feature_report = shid_set_feature_report;
++
++	ssam_device_set_drvdata(sdev, shid);
++	return surface_hid_device_add(shid);
++}
++
++static void surface_hid_remove(struct ssam_device *sdev)
++{
++	surface_hid_device_destroy(ssam_device_get_drvdata(sdev));
++}
++
++static const struct ssam_device_id surface_hid_match[] = {
++	{ SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, surface_hid_match);
++
++static struct ssam_device_driver surface_hid_driver = {
++	.probe = surface_hid_probe,
++	.remove = surface_hid_remove,
++	.match_table = surface_hid_match,
++	.driver = {
++		.name = "surface_hid",
++		.pm = &surface_hid_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++static int surface_hid_driver_register(void)
++{
++	return ssam_device_driver_register(&surface_hid_driver);
++}
++
++static void surface_hid_driver_unregister(void)
++{
++	ssam_device_driver_unregister(&surface_hid_driver);
++}
++
++#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++static int surface_hid_driver_register(void)
++{
++	return 0;
++}
++
++static void surface_hid_driver_unregister(void)
++{
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
++
++
++/* -- Driver setup (KBD). --------------------------------------------------- */
++
++static int surface_kbd_probe(struct platform_device *pdev)
++{
++	struct ssam_controller *ctrl;
++	struct surface_hid_device *shid;
++	int status;
++
++	// add device link to EC
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status)
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++
++	shid = devm_kzalloc(&pdev->dev, sizeof(*shid), GFP_KERNEL);
++	if (!shid)
++		return -ENOMEM;
++
++	shid->dev = &pdev->dev;
++	shid->ctrl = ctrl;
++
++	shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
++	shid->uid.category = SSAM_SSH_TC_KBD;
++	shid->uid.target = 2;
++	shid->uid.instance = 0;
++	shid->uid.function = 0;
++
++	shid->notif.base.priority = 1;
++	shid->notif.base.fn = ssam_kbd_event_fn;
++	shid->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
++	shid->notif.event.id.target_category = shid->uid.category;
++	shid->notif.event.id.instance = shid->uid.instance;
++	shid->notif.event.mask = SSAM_EVENT_MASK_NONE;
++	shid->notif.event.flags = 0;
++
++	shid->ops.get_descriptor = ssam_kbd_get_descriptor;
++	shid->ops.output_report = skbd_output_report;
++	shid->ops.get_feature_report = skbd_get_feature_report;
++	shid->ops.set_feature_report = skbd_set_feature_report;
++
++	platform_set_drvdata(pdev, shid);
++	return surface_hid_device_add(shid);
++}
++
++static int surface_kbd_remove(struct platform_device *pdev)
++{
++	surface_hid_device_destroy(platform_get_drvdata(pdev));
++	return 0;
++}
++
++static const struct acpi_device_id surface_kbd_match[] = {
++	{ "MSHW0096" },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
++
++static struct platform_driver surface_kbd_driver = {
++	.probe = surface_kbd_probe,
++	.remove = surface_kbd_remove,
++	.driver = {
++		.name = "surface_keyboard",
++		.acpi_match_table = surface_kbd_match,
++		.pm = &surface_hid_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init surface_hid_init(void)
++{
++	int status;
++
++	status = surface_hid_driver_register();
++	if (status)
++		return status;
++
++	status = platform_driver_register(&surface_kbd_driver);
++	if (status)
++		surface_hid_driver_unregister();
++
++	return status;
++}
++module_init(surface_hid_init);
++
++static void __exit surface_hid_exit(void)
++{
++	platform_driver_unregister(&surface_kbd_driver);
++	surface_hid_driver_unregister();
++}
++module_exit(surface_hid_exit);
++
++MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("HID transport-/device-driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_hotplug.c b/drivers/misc/surface_aggregator/clients/surface_hotplug.c
+new file mode 100644
+index 000000000000..f18cc17d019d
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_hotplug.c
+@@ -0,0 +1,1285 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Book (gen. 2 and later) discrete GPU (dGPU) hot-plug system driver.
++ *
++ * Supports explicit setting of the dGPU power-state on the Surface Books via
++ * a user-space interface. Properly handles dGPU hot-plugging by detaching the
++ * base of the device.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/gpio.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/sysfs.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_acpi_notify.h>
++
++
++// TODO: vgaswitcheroo integration
++
++
++static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix);
++
++
++#define SHPS_DSM_REVISION	1
++#define SHPS_DSM_GPU_ADDRS	0x02
++#define SHPS_DSM_GPU_POWER	0x05
++static const guid_t SHPS_DSM_UUID =
++	GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd,
++		  0x32, 0x0e, 0x10, 0x36, 0x0a);
++
++
++#define SAM_DGPU_TC			0x13
++#define SAM_DGPU_CID_POWERON		0x02
++#define ACPI_SGCP_NOTIFY_POWER_ON	0x81
++
++#define SHPS_DSM_GPU_ADDRS_RP		"RP5_PCIE"
++#define SHPS_DSM_GPU_ADDRS_DGPU		"DGPU_PCIE"
++#define SHPS_PCI_GPU_ADDR_RP			"\\_SB.PCI0.RP13._ADR"
++
++static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false };
++static const struct acpi_gpio_params gpio_base_presence     = { 1, 0, false };
++static const struct acpi_gpio_params gpio_dgpu_power_int    = { 2, 0, false };
++static const struct acpi_gpio_params gpio_dgpu_power        = { 3, 0, false };
++static const struct acpi_gpio_params gpio_dgpu_presence_int = { 4, 0, false };
++static const struct acpi_gpio_params gpio_dgpu_presence     = { 5, 0, false };
++
++static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
++	{ "base_presence-int-gpio", &gpio_base_presence_int, 1 },
++	{ "base_presence-gpio",     &gpio_base_presence,     1 },
++	{ "dgpu_power-int-gpio",    &gpio_dgpu_power_int,    1 },
++	{ "dgpu_power-gpio",        &gpio_dgpu_power,        1 },
++	{ "dgpu_presence-int-gpio", &gpio_dgpu_presence_int, 1 },
++	{ "dgpu_presence-gpio",     &gpio_dgpu_presence,     1 },
++	{ },
++};
++
++
++enum shps_dgpu_power {
++	SHPS_DGPU_POWER_OFF      = 0,
++	SHPS_DGPU_POWER_ON       = 1,
++	SHPS_DGPU_POWER_UNKNOWN  = 2,
++};
++
++static const char *shps_dgpu_power_str(enum shps_dgpu_power power)
++{
++	if (power == SHPS_DGPU_POWER_OFF)
++		return "off";
++	else if (power == SHPS_DGPU_POWER_ON)
++		return "on";
++	else if (power == SHPS_DGPU_POWER_UNKNOWN)
++		return "unknown";
++	else
++		return "<invalid>";
++}
++
++enum shps_notification_method {
++	SHPS_NOTIFICATION_METHOD_SAN = 1,
++	SHPS_NOTIFICATION_METHOD_SGCP = 2
++};
++
++struct shps_hardware_traits {
++	enum shps_notification_method notification_method;
++	const char *dgpu_rp_pci_address;
++};
++
++struct shps_driver_data {
++	struct ssam_controller *ctrl;
++	struct platform_device *pdev;
++
++	struct mutex lock;
++	struct pci_dev *dgpu_root_port;
++	struct pci_saved_state *dgpu_root_port_state;
++	struct gpio_desc *gpio_dgpu_power;
++	struct gpio_desc *gpio_dgpu_presence;
++	struct gpio_desc *gpio_base_presence;
++	unsigned int irq_dgpu_presence;
++	unsigned int irq_base_presence;
++	unsigned long state;
++	acpi_handle sgpc_handle;
++	struct shps_hardware_traits hardware_traits;
++
++	struct notifier_block dgpu_nf;
++};
++
++struct shps_hardware_probe {
++	const char *hardware_id;
++	int generation;
++	struct shps_hardware_traits *hardware_traits;
++};
++
++static struct shps_hardware_traits shps_gen1_hwtraits = {
++	.notification_method = SHPS_NOTIFICATION_METHOD_SAN
++};
++
++static struct shps_hardware_traits shps_gen2_hwtraits = {
++	.notification_method = SHPS_NOTIFICATION_METHOD_SGCP,
++	.dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP
++};
++
++static const struct shps_hardware_probe shps_hardware_probe_match[] = {
++	/* Surface Book 3 */
++	{ "MSHW0117", 2, &shps_gen2_hwtraits },
++
++	/* Surface Book 2 (default, must be last entry) */
++	{ NULL, 1, &shps_gen1_hwtraits }
++};
++
++#define SHPS_STATE_BIT_PWRTGT		0	/* desired power state: 1 for on, 0 for off */
++#define SHPS_STATE_BIT_RPPWRON_SYNC	1	/* synchronous/requested power-up in progress  */
++#define SHPS_STATE_BIT_WAKE_ENABLED	2	/* wakeup via base-presence GPIO enabled */
++
++
++#define SHPS_DGPU_PARAM_PERM		0644
++
++enum shps_dgpu_power_mp {
++	SHPS_DGPU_MP_POWER_OFF  = SHPS_DGPU_POWER_OFF,
++	SHPS_DGPU_MP_POWER_ON   = SHPS_DGPU_POWER_ON,
++	SHPS_DGPU_MP_POWER_ASIS = -1,
++
++	__SHPS_DGPU_MP_POWER_START = -1,
++	__SHPS_DGPU_MP_POWER_END   = 1,
++};
++
++static int param_dgpu_power_set(const char *val, const struct kernel_param *kp)
++{
++	int power = SHPS_DGPU_MP_POWER_OFF;
++	int status;
++
++	status = kstrtoint(val, 0, &power);
++	if (status)
++		return status;
++
++	if (power < __SHPS_DGPU_MP_POWER_START || power > __SHPS_DGPU_MP_POWER_END)
++		return -EINVAL;
++
++	return param_set_int(val, kp);
++}
++
++static const struct kernel_param_ops param_dgpu_power_ops = {
++	.set = param_dgpu_power_set,
++	.get = param_get_int,
++};
++
++static int param_dgpu_power_init = SHPS_DGPU_MP_POWER_OFF;
++static int param_dgpu_power_exit = SHPS_DGPU_MP_POWER_ON;
++static int param_dgpu_power_susp = SHPS_DGPU_MP_POWER_ASIS;
++static bool param_dtx_latch = true;
++
++module_param_cb(dgpu_power_init, &param_dgpu_power_ops, &param_dgpu_power_init, SHPS_DGPU_PARAM_PERM);
++module_param_cb(dgpu_power_exit, &param_dgpu_power_ops, &param_dgpu_power_exit, SHPS_DGPU_PARAM_PERM);
++module_param_cb(dgpu_power_susp, &param_dgpu_power_ops, &param_dgpu_power_susp, SHPS_DGPU_PARAM_PERM);
++module_param_named(dtx_latch, param_dtx_latch, bool, SHPS_DGPU_PARAM_PERM);
++
++MODULE_PARM_DESC(dgpu_power_init, "dGPU power state to be set on init (0: off / 1: on / 2: as-is, default: off)");
++MODULE_PARM_DESC(dgpu_power_exit, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: on)");
++MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)");
++MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)");
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x06,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
++	.target_category = SSAM_SSH_TC_BAS,
++	.target_id       = 0x01,
++	.command_id      = 0x07,
++	.instance_id     = 0x00,
++});
++
++static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) {
++	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
++	acpi_status status;
++	struct acpi_object_list input;
++	union acpi_object input_args[0];
++	u64 device_addr;
++	u8 bus, dev, fun;
++
++	input.count = 0;
++	input.pointer = input_args;
++
++
++	status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	bus = 0;
++	dev = (device_addr & 0xFF0000) >> 16;
++	fun = device_addr & 0xFF;
++
++	dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n",
++		 (u32)bus, (u32)dev, (u32)fun);
++
++	return bus << 8 | PCI_DEVFN(dev, fun);
++}
++
++static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry)
++{
++	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
++	union acpi_object *result;
++	union acpi_object *e0;
++	union acpi_object *e1;
++	union acpi_object *e2;
++	u64 device_addr = 0;
++	u8 bus, dev, fun;
++	int i;
++
++
++	result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
++					 SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE);
++	if (!result)
++		return -EFAULT;
++
++	// three entries per device: name, address, <integer>
++	for (i = 0; i + 2 < result->package.count; i += 3) {
++		e0 = &result->package.elements[i];
++		e1 = &result->package.elements[i + 1];
++		e2 = &result->package.elements[i + 2];
++
++		if (e0->type != ACPI_TYPE_STRING) {
++			ACPI_FREE(result);
++			return -EIO;
++		}
++
++		if (e1->type != ACPI_TYPE_INTEGER) {
++			ACPI_FREE(result);
++			return -EIO;
++		}
++
++		if (e2->type != ACPI_TYPE_INTEGER) {
++			ACPI_FREE(result);
++			return -EIO;
++		}
++
++		if (strncmp(e0->string.pointer, entry, 64) == 0)
++			device_addr = e1->integer.value;
++	}
++
++	ACPI_FREE(result);
++	if (device_addr == 0)
++		return -ENODEV;
++
++
++	// convert address
++	bus = (device_addr & 0x0FF00000) >> 20;
++	dev = (device_addr & 0x000F8000) >> 15;
++	fun = (device_addr & 0x00007000) >> 12;
++
++	return bus << 8 | PCI_DEVFN(dev, fun);
++}
++
++static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *dev;
++	int addr;
++
++
++	if (drvdata->hardware_traits.dgpu_rp_pci_address) {
++		addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address);
++	} else {
++		addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP);
++	}
++
++	if (addr < 0)
++		return ERR_PTR(addr);
++
++	dev = pci_get_domain_bus_and_slot(0, (addr & 0xFF00) >> 8, addr & 0xFF);
++	return dev ? dev : ERR_PTR(-ENODEV);
++}
++
++
++static int shps_dgpu_dsm_get_power_unlocked(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct gpio_desc *gpio = drvdata->gpio_dgpu_power;
++	int status;
++
++	status = gpiod_get_value_cansleep(gpio);
++	if (status < 0)
++		return status;
++
++	return status == 0 ? SHPS_DGPU_POWER_OFF : SHPS_DGPU_POWER_ON;
++}
++
++static int shps_dgpu_dsm_get_power(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	mutex_lock(&drvdata->lock);
++	status = shps_dgpu_dsm_get_power_unlocked(pdev);
++	mutex_unlock(&drvdata->lock);
++
++	return status;
++}
++
++static int __shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
++	union acpi_object *result;
++	union acpi_object param;
++
++	dev_info(&pdev->dev, "setting dGPU direct power to \'%s\'\n", shps_dgpu_power_str(power));
++
++	param.type = ACPI_TYPE_INTEGER;
++	param.integer.value = power == SHPS_DGPU_POWER_ON;
++
++	result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
++					 SHPS_DSM_GPU_POWER, &param, ACPI_TYPE_BUFFER);
++	if (!result)
++		return -EFAULT;
++
++	// check for the expected result
++	if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
++		ACPI_FREE(result);
++		return -EIO;
++	}
++
++	ACPI_FREE(result);
++	return 0;
++}
++
++static int shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	int status;
++
++	if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
++		return -EINVAL;
++
++	status = shps_dgpu_dsm_get_power_unlocked(pdev);
++	if (status < 0)
++		return status;
++	if (status == power)
++		return 0;
++
++	return __shps_dgpu_dsm_set_power_unlocked(pdev, power);
++}
++
++static int shps_dgpu_dsm_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	mutex_lock(&drvdata->lock);
++	status = shps_dgpu_dsm_set_power_unlocked(pdev, power);
++	mutex_unlock(&drvdata->lock);
++
++	return status;
++}
++
++
++static bool shps_rp_link_up(struct pci_dev *rp)
++{
++	u16 lnksta = 0, sltsta = 0;
++
++	pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
++	pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
++
++	return (lnksta & PCI_EXP_LNKSTA_DLLLA) || (sltsta & PCI_EXP_SLTSTA_PDS);
++}
++
++
++static int shps_dgpu_rp_get_power_unlocked(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *rp = drvdata->dgpu_root_port;
++
++	if (rp->current_state == PCI_D3hot || rp->current_state == PCI_D3cold)
++		return SHPS_DGPU_POWER_OFF;
++	else if (rp->current_state == PCI_UNKNOWN || rp->current_state == PCI_POWER_ERROR)
++		return SHPS_DGPU_POWER_UNKNOWN;
++	else
++		return SHPS_DGPU_POWER_ON;
++}
++
++static int shps_dgpu_rp_get_power(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	mutex_lock(&drvdata->lock);
++	status = shps_dgpu_rp_get_power_unlocked(pdev);
++	mutex_unlock(&drvdata->lock);
++
++	return status;
++}
++
++static int __shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *rp = drvdata->dgpu_root_port;
++	int status, i;
++
++	dev_info(&pdev->dev, "setting dGPU power state to \'%s\'\n", shps_dgpu_power_str(power));
++
++	dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.1");
++	if (power == SHPS_DGPU_POWER_ON) {
++		set_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
++		pci_set_power_state(rp, PCI_D0);
++
++		if (drvdata->dgpu_root_port_state)
++			pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
++
++		pci_restore_state(rp);
++
++		if (!pci_is_enabled(rp))
++			pci_enable_device(rp);
++
++		pci_set_master(rp);
++		clear_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
++
++		set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	} else {
++		if (!drvdata->dgpu_root_port_state) {
++			pci_save_state(rp);
++			drvdata->dgpu_root_port_state = pci_store_saved_state(rp);
++		}
++
++		/*
++		 * To properly update the hot-plug system we need to "remove" the dGPU
++		 * before disabling it and sending it to D3cold. Following this, we
++		 * need to wait for the link and slot status to actually change.
++		 */
++		status = shps_dgpu_dsm_set_power_unlocked(pdev, SHPS_DGPU_POWER_OFF);
++		if (status)
++			return status;
++
++		for (i = 0; i < 20 && shps_rp_link_up(rp); i++)
++			msleep(50);
++
++		if (shps_rp_link_up(rp))
++			dev_err(&pdev->dev, "dGPU removal via DSM timed out\n");
++
++		pci_clear_master(rp);
++
++		if (pci_is_enabled(rp))
++			pci_disable_device(rp);
++
++		pci_set_power_state(rp, PCI_D3cold);
++
++		clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	}
++	dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.2");
++
++	return 0;
++}
++
++static int shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	int status;
++
++	if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
++		return -EINVAL;
++
++	status = shps_dgpu_rp_get_power_unlocked(pdev);
++	if (status < 0)
++		return status;
++	if (status == power)
++		return 0;
++
++	return __shps_dgpu_rp_set_power_unlocked(pdev, power);
++}
++
++static int shps_dgpu_rp_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	mutex_lock(&drvdata->lock);
++	status = shps_dgpu_rp_set_power_unlocked(pdev, power);
++	mutex_unlock(&drvdata->lock);
++
++	return status;
++}
++
++
++static int shps_dgpu_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	if (!param_dtx_latch)
++		return shps_dgpu_rp_set_power(pdev, power);
++
++	if (power == SHPS_DGPU_POWER_ON) {
++		status = ssam_bas_latch_lock(drvdata->ctrl);
++		if (status)
++			return status;
++
++		status = shps_dgpu_rp_set_power(pdev, power);
++		if (status)
++			ssam_bas_latch_unlock(drvdata->ctrl);
++
++	} else {
++		status = shps_dgpu_rp_set_power(pdev, power);
++		if (status)
++			return status;
++
++		status = ssam_bas_latch_unlock(drvdata->ctrl);
++	}
++
++	return status;
++}
++
++
++static int shps_dgpu_is_present(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata;
++
++	drvdata = platform_get_drvdata(pdev);
++	return gpiod_get_value_cansleep(drvdata->gpio_dgpu_presence);
++}
++
++
++static ssize_t dgpu_power_show(struct device *dev, struct device_attribute *attr, char *data)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	int power = shps_dgpu_rp_get_power(pdev);
++
++	if (power < 0)
++		return power;
++
++	return sprintf(data, "%s\n", shps_dgpu_power_str(power));
++}
++
++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
++				const char *data, size_t count)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	enum shps_dgpu_power power;
++	bool b = false;
++	int status;
++
++	status = kstrtobool(data, &b);
++	if (status)
++		return status;
++
++	status = shps_dgpu_is_present(pdev);
++	if (status <= 0)
++		return status < 0 ? status : -EPERM;
++
++	power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
++	status = shps_dgpu_set_power(pdev, power);
++
++	return status < 0 ? status : count;
++}
++
++static ssize_t dgpu_power_dsm_show(struct device *dev, struct device_attribute *attr, char *data)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	int power = shps_dgpu_dsm_get_power(pdev);
++
++	if (power < 0)
++		return power;
++
++	return sprintf(data, "%s\n", shps_dgpu_power_str(power));
++}
++
++static ssize_t dgpu_power_dsm_store(struct device *dev, struct device_attribute *attr,
++				    const char *data, size_t count)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	enum shps_dgpu_power power;
++	bool b = false;
++	int status;
++
++	status = kstrtobool(data, &b);
++	if (status)
++		return status;
++
++	status = shps_dgpu_is_present(pdev);
++	if (status <= 0)
++		return status < 0 ? status : -EPERM;
++
++	power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
++	status = shps_dgpu_dsm_set_power(pdev, power);
++
++	return status < 0 ? status : count;
++}
++
++static DEVICE_ATTR_RW(dgpu_power);
++static DEVICE_ATTR_RW(dgpu_power_dsm);
++
++static struct attribute *shps_power_attrs[] = {
++	&dev_attr_dgpu_power.attr,
++	&dev_attr_dgpu_power_dsm.attr,
++	NULL,
++};
++ATTRIBUTE_GROUPS(shps_power);
++
++
++static void dbg_dump_power_states(struct platform_device *pdev, const char *prefix)
++{
++	enum shps_dgpu_power power_dsm;
++	enum shps_dgpu_power power_rp;
++	int status;
++
++	status = shps_dgpu_rp_get_power_unlocked(pdev);
++	if (status < 0)
++		dev_err(&pdev->dev, "%s: failed to get root-port power state: %d\n", prefix, status);
++	power_rp = status;
++
++	status = shps_dgpu_rp_get_power_unlocked(pdev);
++	if (status < 0)
++		dev_err(&pdev->dev, "%s: failed to get direct power state: %d\n", prefix, status);
++	power_dsm = status;
++
++	dev_dbg(&pdev->dev, "%s: root-port power state: %d\n", prefix, power_rp);
++	dev_dbg(&pdev->dev, "%s: direct power state:    %d\n", prefix, power_dsm);
++}
++
++static void dbg_dump_pciesta(struct platform_device *pdev, const char *prefix)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *rp = drvdata->dgpu_root_port;
++	u16 lnksta, lnksta2, sltsta, sltsta2;
++
++	pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
++	pcie_capability_read_word(rp, PCI_EXP_LNKSTA2, &lnksta2);
++	pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
++	pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2);
++
++	dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta);
++	dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2);
++	dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta);
++	dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2);
++}
++
++static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *rp = drvdata->dgpu_root_port;
++
++	dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state);
++	dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved);
++	dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state);
++	dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt));
++	dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster);
++}
++
++static int shps_pm_prepare(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	bool pwrtgt;
++	int status = 0;
++
++	dbg_dump_power_states(pdev, "shps_pm_prepare");
++
++	if (param_dgpu_power_susp != SHPS_DGPU_MP_POWER_ASIS) {
++		pwrtgt = test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++
++		status = shps_dgpu_set_power(pdev, param_dgpu_power_susp);
++		if (status) {
++			dev_err(&pdev->dev, "failed to power %s dGPU: %d\n",
++				param_dgpu_power_susp == SHPS_DGPU_MP_POWER_OFF ? "off" : "on",
++				status);
++			return status;
++		}
++
++		if (pwrtgt)
++			set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++		else
++			clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	}
++
++	return 0;
++}
++
++static void shps_pm_complete(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	dbg_dump_power_states(pdev, "shps_pm_complete");
++	dbg_dump_pciesta(pdev, "shps_pm_complete");
++	dbg_dump_drvsta(pdev, "shps_pm_complete.1");
++
++	// update power target, dGPU may have been detached while suspended
++	status = shps_dgpu_is_present(pdev);
++	if (status < 0) {
++		dev_err(&pdev->dev, "failed to get dGPU presence: %d\n", status);
++		return;
++	} else if (status == 0) {
++		clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	}
++
++	/*
++	 * During resume, the PCIe core will power on the root-port, which in turn
++	 * will power on the dGPU. Most of the state synchronization is already
++	 * handled via the SAN RQSG handler, so it is in a fully consistent
++	 * on-state here. If requested, turn it off here.
++	 *
++	 * As there seem to be some synchronization issues turning off the dGPU
++	 * directly after the power-on SAN RQSG notification during the resume
++	 * process, let's do this here.
++	 *
++	 * TODO/FIXME:
++	 *   This does not combat unhandled power-ons when the device is not fully
++	 *   resumed, i.e. re-suspended before shps_pm_complete is called. Those
++	 *   should normally not be an issue, but the dGPU does get hot even though
++	 *   it is suspended, so ideally we want to keep it off.
++	 */
++	if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
++		status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
++		if (status)
++			dev_err(&pdev->dev, "failed to power-off dGPU: %d\n", status);
++	}
++
++	dbg_dump_drvsta(pdev, "shps_pm_complete.2");
++}
++
++static int shps_pm_suspend(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	if (device_may_wakeup(dev)) {
++		status = enable_irq_wake(drvdata->irq_base_presence);
++		if (status)
++			return status;
++
++		set_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state);
++	}
++
++	return 0;
++}
++
++static int shps_pm_resume(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status = 0;
++
++	if (test_and_clear_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state))
++		status = disable_irq_wake(drvdata->irq_base_presence);
++
++	return status;
++}
++
++static void shps_shutdown(struct platform_device *pdev)
++{
++	int status;
++
++	/*
++	 * Turn on dGPU before shutting down. This allows the core drivers to
++	 * properly shut down the device. If we don't do this, the pcieport driver
++	 * will complain that the device has already been disabled.
++	 */
++	status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_ON);
++	if (status)
++		dev_err(&pdev->dev, "failed to turn on dGPU: %d\n", status);
++}
++
++static int shps_dgpu_detached(struct platform_device *pdev)
++{
++	dbg_dump_power_states(pdev, "shps_dgpu_detached");
++	return shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
++}
++
++static int shps_dgpu_attached(struct platform_device *pdev)
++{
++	dbg_dump_power_states(pdev, "shps_dgpu_attached");
++	return 0;
++}
++
++static int shps_dgpu_powered_on(struct platform_device *pdev)
++{
++	/*
++	 * This function gets called directly after a power-state transition of
++	 * the dGPU root port out of D3cold state, indicating a power-on of the
++	 * dGPU. Specifically, this function is called from the RQSG handler of
++	 * SAN, invoked by the ACPI _ON method of the dGPU root port. This means
++	 * that this function is run inside `pci_set_power_state(rp, ...)`
++	 * synchronously and thus returns before the `pci_set_power_state` call
++	 * does.
++	 *
++	 * `pci_set_power_state` may either be called by us or when the PCI
++	 * subsystem decides to power up the root port (e.g. during resume). Thus
++	 * we should use this function to ensure that the dGPU and root port
++	 * states are consistent when an unexpected power-up is encountered.
++	 */
++
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct pci_dev *rp = drvdata->dgpu_root_port;
++	int status;
++
++	dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.1");
++
++	// if we caused the root port to power-on, return
++	if (test_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state))
++		return 0;
++
++	// if dGPU is not present, force power-target to off and return
++	status = shps_dgpu_is_present(pdev);
++	if (status == 0)
++		clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	if (status <= 0)
++		return status;
++
++	mutex_lock(&drvdata->lock);
++
++	dbg_dump_power_states(pdev, "shps_dgpu_powered_on.1");
++	dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.1");
++	if (drvdata->dgpu_root_port_state)
++		pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
++	pci_restore_state(rp);
++	if (!pci_is_enabled(rp))
++		pci_enable_device(rp);
++	pci_set_master(rp);
++	dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.2");
++	dbg_dump_power_states(pdev, "shps_dgpu_powered_on.2");
++	dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.2");
++
++	mutex_unlock(&drvdata->lock);
++
++	if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
++		dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n");
++		// TODO: schedule state re-check and update
++	}
++
++	return 0;
++}
++
++static int shps_dgpu_handle_rqsg(struct notifier_block *nb, unsigned long action, void *data)
++{
++	struct shps_driver_data *drvdata = container_of(nb, struct shps_driver_data, dgpu_nf);
++	struct platform_device *pdev = drvdata->pdev;
++	struct san_dgpu_event *evt = data;
++
++	if (evt->category == SAM_DGPU_TC && evt->command == SAM_DGPU_CID_POWERON)
++		return shps_dgpu_powered_on(pdev);
++
++	dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
++		 evt->category, evt->command, evt->instance);
++	return 0;
++}
++
++static irqreturn_t shps_dgpu_presence_irq(int irq, void *data)
++{
++	struct platform_device *pdev = data;
++	bool dgpu_present;
++	int status;
++
++	status = shps_dgpu_is_present(pdev);
++	if (status < 0) {
++		dev_err(&pdev->dev, "failed to check physical dGPU presence: %d\n", status);
++		return IRQ_HANDLED;
++	}
++
++	dgpu_present = status != 0;
++	dev_info(&pdev->dev, "dGPU physically %s\n", dgpu_present ? "attached" : "detached");
++
++	if (dgpu_present)
++		status = shps_dgpu_attached(pdev);
++	else
++		status = shps_dgpu_detached(pdev);
++
++	if (status)
++		dev_err(&pdev->dev, "error handling dGPU interrupt: %d\n", status);
++
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t shps_base_presence_irq(int irq, void *data)
++{
++	return IRQ_HANDLED;	// nothing to do, just wake
++}
++
++
++static int shps_gpios_setup(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	struct gpio_desc *gpio_dgpu_power;
++	struct gpio_desc *gpio_dgpu_presence;
++	struct gpio_desc *gpio_base_presence;
++	int status;
++
++	// get GPIOs
++	gpio_dgpu_power = devm_gpiod_get(&pdev->dev, "dgpu_power", GPIOD_IN);
++	if (IS_ERR(gpio_dgpu_power)) {
++		status = PTR_ERR(gpio_dgpu_power);
++		goto err_out;
++	}
++
++	gpio_dgpu_presence = devm_gpiod_get(&pdev->dev, "dgpu_presence", GPIOD_IN);
++	if (IS_ERR(gpio_dgpu_presence)) {
++		status = PTR_ERR(gpio_dgpu_presence);
++		goto err_out;
++	}
++
++	gpio_base_presence = devm_gpiod_get(&pdev->dev, "base_presence", GPIOD_IN);
++	if (IS_ERR(gpio_base_presence)) {
++		status = PTR_ERR(gpio_base_presence);
++		goto err_out;
++	}
++
++	// export GPIOs
++	status = gpiod_export(gpio_dgpu_power, false);
++	if (status)
++		goto err_out;
++
++	status = gpiod_export(gpio_dgpu_presence, false);
++	if (status)
++		goto err_export_dgpu_presence;
++
++	status = gpiod_export(gpio_base_presence, false);
++	if (status)
++		goto err_export_base_presence;
++
++	// create sysfs links
++	status = gpiod_export_link(&pdev->dev, "gpio-dgpu_power", gpio_dgpu_power);
++	if (status)
++		goto err_link_dgpu_power;
++
++	status = gpiod_export_link(&pdev->dev, "gpio-dgpu_presence", gpio_dgpu_presence);
++	if (status)
++		goto err_link_dgpu_presence;
++
++	status = gpiod_export_link(&pdev->dev, "gpio-base_presence", gpio_base_presence);
++	if (status)
++		goto err_link_base_presence;
++
++	drvdata->gpio_dgpu_power = gpio_dgpu_power;
++	drvdata->gpio_dgpu_presence = gpio_dgpu_presence;
++	drvdata->gpio_base_presence = gpio_base_presence;
++	return 0;
++
++err_link_base_presence:
++	sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
++err_link_dgpu_presence:
++	sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
++err_link_dgpu_power:
++	gpiod_unexport(gpio_base_presence);
++err_export_base_presence:
++	gpiod_unexport(gpio_dgpu_presence);
++err_export_dgpu_presence:
++	gpiod_unexport(gpio_dgpu_power);
++err_out:
++	return status;
++}
++
++static void shps_gpios_remove(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++
++	sysfs_remove_link(&pdev->dev.kobj, "gpio-base_presence");
++	sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
++	sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
++	gpiod_unexport(drvdata->gpio_base_presence);
++	gpiod_unexport(drvdata->gpio_dgpu_presence);
++	gpiod_unexport(drvdata->gpio_dgpu_power);
++}
++
++static int shps_gpios_setup_irq(struct platform_device *pdev)
++{
++	const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
++	const int irqf_base = IRQF_SHARED;
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	status = gpiod_to_irq(drvdata->gpio_base_presence);
++	if (status < 0)
++		return status;
++	drvdata->irq_base_presence = status;
++
++	status = gpiod_to_irq(drvdata->gpio_dgpu_presence);
++	if (status < 0)
++		return status;
++	drvdata->irq_dgpu_presence = status;
++
++	status = request_irq(drvdata->irq_base_presence,
++			     shps_base_presence_irq, irqf_base,
++			     "shps_base_presence_irq", pdev);
++	if (status) {
++		dev_err(&pdev->dev, "base irq failed: %d\n", status);
++		return status;
++	}
++
++	status = request_threaded_irq(drvdata->irq_dgpu_presence,
++				      NULL, shps_dgpu_presence_irq, irqf_dgpu,
++				      "shps_dgpu_presence_irq", pdev);
++	if (status) {
++		free_irq(drvdata->irq_base_presence, pdev);
++		return status;
++	}
++
++	return 0;
++}
++
++static void shps_gpios_remove_irq(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++
++	free_irq(drvdata->irq_base_presence, pdev);
++	free_irq(drvdata->irq_dgpu_presence, pdev);
++}
++
++static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) {
++	struct platform_device *pdev = context;
++	switch (value) {
++		case ACPI_SGCP_NOTIFY_POWER_ON:
++			shps_dgpu_powered_on(pdev);
++	}
++}
++
++static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) {
++	acpi_handle handle;
++	acpi_status status;
++
++	status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "error in get_handle %x\n", status);
++		return -ENXIO;
++	}
++
++	status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev);
++	if (ACPI_FAILURE(status)) {
++		dev_err(&pdev->dev, "error in install notify %x\n", status);
++		*sgpc_handle = NULL;
++		return -EFAULT;
++	}
++
++	*sgpc_handle = handle;
++	return 0;
++}
++
++static void shps_remove_sgcp_notification(struct platform_device *pdev) {
++	acpi_status status;
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++
++	if (drvdata->sgpc_handle) {
++		status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify);
++		if (ACPI_FAILURE(status))
++			dev_err(&pdev->dev, "failed to remove notify handler: %x\n", status);
++	}
++}
++
++static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) {
++	const struct shps_hardware_probe *p;
++
++	for (p = shps_hardware_probe_match; p->hardware_id; ++p) {
++		if (acpi_dev_present(p->hardware_id, NULL, -1)) {
++			break;
++		}
++	}
++
++	dev_info(&pdev->dev,
++		"shps_detect_hardware_traits found device %s, generation %d\n",
++		p->hardware_id ? p->hardware_id : "SAN (default)",
++		p->generation);
++
++	return *p->hardware_traits;
++}
++
++static int shps_probe(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata;
++	struct ssam_controller *ctrl;
++	struct device_link *link;
++	int power, status;
++	struct shps_hardware_traits detected_traits;
++
++	if (gpiod_count(&pdev->dev, NULL) < 0) {
++		dev_err(&pdev->dev, "gpiod_count returned < 0\n");
++		return -ENODEV;
++	}
++
++	// link to SSH
++	status = ssam_client_bind(&pdev->dev, &ctrl);
++	if (status) {
++		return status == -ENXIO ? -EPROBE_DEFER : status;
++	}
++
++	// detect what kind of hardware we're running
++	detected_traits = shps_detect_hardware_traits(pdev);
++
++	if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
++		// link to SAN
++		status = san_client_link(&pdev->dev);
++		if (status) {
++			dev_err(&pdev->dev, "failed to register as SAN client: %d\n", status);
++			return status == -ENXIO ? -EPROBE_DEFER : status;
++		}
++	}
++
++	status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios);
++	if (status) {
++		dev_err(&pdev->dev, "failed to add gpios: %d\n", status);
++		return status;
++	}
++
++	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
++	if (!drvdata)
++		return -ENOMEM;
++
++	mutex_init(&drvdata->lock);
++	platform_set_drvdata(pdev, drvdata);
++
++	drvdata->ctrl = ctrl;
++	drvdata->pdev = pdev;
++	drvdata->hardware_traits = detected_traits;
++
++	drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev);
++	if (IS_ERR(drvdata->dgpu_root_port)) {
++		status = PTR_ERR(drvdata->dgpu_root_port);
++		dev_err(&pdev->dev, "failed to get pci dev: %d\n", status);
++		return status;
++	}
++
++	status = shps_gpios_setup(pdev);
++	if (status) {
++		dev_err(&pdev->dev, "unable to set up gpios, %d\n", status);
++		goto err_gpio;
++	}
++
++	status = shps_gpios_setup_irq(pdev);
++	if (status) {
++		dev_err(&pdev->dev, "unable to set up irqs %d\n", status);
++		goto err_gpio_irqs;
++	}
++
++	status = device_add_groups(&pdev->dev, shps_power_groups);
++	if (status)
++		goto err_devattr;
++
++	link = device_link_add(&pdev->dev, &drvdata->dgpu_root_port->dev,
++			       DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER);
++	if (!link)
++		goto err_devlink;
++
++	if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
++		drvdata->dgpu_nf.priority = 1;
++		drvdata->dgpu_nf.notifier_call = shps_dgpu_handle_rqsg;
++
++		status = san_dgpu_notifier_register(&drvdata->dgpu_nf);
++		if (status) {
++			dev_err(&pdev->dev, "unable to register SAN notification handler (%d)\n", status);
++			goto err_devlink;
++		}
++	} else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
++		status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle);
++		if (status) {
++			dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status);
++			goto err_devlink;
++		}
++	}
++
++	// if dGPU is not present turn-off root-port, else obey module param
++	status = shps_dgpu_is_present(pdev);
++	if (status < 0)
++		goto err_post_notification;
++
++	power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init;
++	if (power != SHPS_DGPU_MP_POWER_ASIS) {
++		status = shps_dgpu_set_power(pdev, power);
++		if (status)
++			goto err_post_notification;
++	}
++
++	// initialize power target
++	status = shps_dgpu_rp_get_power(pdev);
++	if (status < 0)
++		goto err_pwrtgt;
++
++	if (status)
++		set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++	else
++		clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
++
++	device_init_wakeup(&pdev->dev, true);
++	return 0;
++
++err_pwrtgt:
++	if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
++		status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
++		if (status)
++			dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
++	}
++err_post_notification:
++	if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
++		shps_remove_sgcp_notification(pdev);
++	} else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
++		san_dgpu_notifier_unregister(&drvdata->dgpu_nf);
++	}
++err_devlink:
++	device_remove_groups(&pdev->dev, shps_power_groups);
++err_devattr:
++	shps_gpios_remove_irq(pdev);
++err_gpio_irqs:
++	shps_gpios_remove(pdev);
++err_gpio:
++	pci_dev_put(drvdata->dgpu_root_port);
++	return status;
++}
++
++static int shps_remove(struct platform_device *pdev)
++{
++	struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
++	int status;
++
++	if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
++		status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
++		if (status)
++			dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
++	}
++
++	device_set_wakeup_capable(&pdev->dev, false);
++
++	if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
++		shps_remove_sgcp_notification(pdev);
++	} else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
++		san_dgpu_notifier_unregister(&drvdata->dgpu_nf);
++	}
++	device_remove_groups(&pdev->dev, shps_power_groups);
++	shps_gpios_remove_irq(pdev);
++	shps_gpios_remove(pdev);
++	pci_dev_put(drvdata->dgpu_root_port);
++
++	return 0;
++}
++
++
++static const struct dev_pm_ops shps_pm_ops = {
++	.prepare = shps_pm_prepare,
++	.complete = shps_pm_complete,
++	.suspend = shps_pm_suspend,
++	.resume = shps_pm_resume,
++};
++
++static const struct acpi_device_id shps_acpi_match[] = {
++	{ "MSHW0153", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, shps_acpi_match);
++
++static struct platform_driver surface_sam_hps = {
++	.probe = shps_probe,
++	.remove = shps_remove,
++	.shutdown = shps_shutdown,
++	.driver = {
++		.name = "surface_dgpu_hotplug",
++		.acpi_match_table = shps_acpi_match,
++		.pm = &shps_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_platform_driver(surface_sam_hps);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("DGPU hot-plug system driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/clients/surface_perfmode.c b/drivers/misc/surface_aggregator/clients/surface_perfmode.c
+new file mode 100644
+index 000000000000..006601b3bea6
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/clients/surface_perfmode.c
+@@ -0,0 +1,122 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface performance-mode driver.
++ *
++ * Proides a user-space interface for the performance mode control provided by
++ * the Surface System Aggregator Module (SSAM), influencing cooling behavior
++ * of the device and potentially managing power limits.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sysfs.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/device.h>
++
++enum sam_perf_mode {
++	SAM_PERF_MODE_NORMAL  = 1,
++	SAM_PERF_MODE_BATTERY = 2,
++	SAM_PERF_MODE_PERF1   = 3,
++	SAM_PERF_MODE_PERF2   = 4,
++
++	__SAM_PERF_MODE__MIN  = 1,
++	__SAM_PERF_MODE__MAX  = 4,
++};
++
++struct ssam_perf_info {
++	__le32 mode;
++	__le16 unknown1;
++	__le16 unknown2;
++} __packed;
++
++static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
++	.target_category = SSAM_SSH_TC_TMP,
++	.command_id      = 0x02,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_perf_mode_set, __le32, {
++	.target_category = SSAM_SSH_TC_TMP,
++	.command_id      = 0x03,
++});
++
++static int ssam_tmp_perf_mode_set(struct ssam_device *sdev, u32 mode)
++{
++	__le32 mode_le = cpu_to_le32(mode);
++
++	if (mode < __SAM_PERF_MODE__MIN || mode > __SAM_PERF_MODE__MAX)
++		return -EINVAL;
++
++	return __ssam_tmp_perf_mode_set(sdev, &mode_le);
++}
++
++static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr,
++			      char *data)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	struct ssam_perf_info info;
++	int status;
++
++	status = ssam_tmp_perf_mode_get(sdev, &info);
++	if (status) {
++		dev_err(dev, "failed to get current performance mode: %d\n",
++			status);
++		return -EIO;
++	}
++
++	return sprintf(data, "%d\n", le32_to_cpu(info.mode));
++}
++
++static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
++			       const char *data, size_t count)
++{
++	struct ssam_device *sdev = to_ssam_device(dev);
++	int perf_mode;
++	int status;
++
++	status = kstrtoint(data, 0, &perf_mode);
++	if (status < 0)
++		return status;
++
++	status = ssam_tmp_perf_mode_set(sdev, perf_mode);
++	if (status < 0)
++		return status;
++
++	return count;
++}
++
++static const DEVICE_ATTR_RW(perf_mode);
++
++static int surface_sam_sid_perfmode_probe(struct ssam_device *sdev)
++{
++	return sysfs_create_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
++}
++
++static void surface_sam_sid_perfmode_remove(struct ssam_device *sdev)
++{
++	sysfs_remove_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
++}
++
++static const struct ssam_device_id ssam_perfmode_match[] = {
++	{ SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
++	{ },
++};
++MODULE_DEVICE_TABLE(ssam, ssam_perfmode_match);
++
++static struct ssam_device_driver surface_sam_sid_perfmode = {
++	.probe = surface_sam_sid_perfmode_probe,
++	.remove = surface_sam_sid_perfmode_remove,
++	.match_table = ssam_perfmode_match,
++	.driver = {
++		.name = "surface_performance_mode",
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++module_ssam_device_driver(surface_sam_sid_perfmode);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Performance mode interface for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/controller.c b/drivers/misc/surface_aggregator/controller.c
+new file mode 100644
+index 000000000000..c5d19feb4d38
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/controller.c
+@@ -0,0 +1,2555 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Main SSAM/SSH controller structure and functionality.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/gpio/consumer.h>
++#include <linux/interrupt.h>
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/serdev.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/srcu.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "controller.h"
++#include "ssh_msgb.h"
++#include "ssh_request_layer.h"
++
++#include "trace.h"
++
++
++/* -- Safe counters. -------------------------------------------------------- */
++
++/**
++ * ssh_seq_reset() - Reset/initialize sequence ID counter.
++ * @c: The counter to reset.
++ */
++static void ssh_seq_reset(struct ssh_seq_counter *c)
++{
++	WRITE_ONCE(c->value, 0);
++}
++
++/**
++ * ssh_seq_next() - Get next sequence ID.
++ * @c: The counter providing the sequence IDs.
++ *
++ * Return: Retunrs the next sequence ID of the counter.
++ */
++static u8 ssh_seq_next(struct ssh_seq_counter *c)
++{
++	u8 old = READ_ONCE(c->value);
++	u8 new = old + 1;
++	u8 ret;
++
++	while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
++		old = ret;
++		new = old + 1;
++	}
++
++	return old;
++}
++
++/**
++ * ssh_rqid_reset() - Reset/initialize request ID counter.
++ * @c: The counter to reset.
++ */
++static void ssh_rqid_reset(struct ssh_rqid_counter *c)
++{
++	WRITE_ONCE(c->value, 0);
++}
++
++/**
++ * ssh_rqid_next() - Get next request ID.
++ * @c: The counter providing the request IDs.
++ *
++ * Return: Returns the next request ID of the counter, skipping any reserved
++ * request IDs.
++ */
++static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
++{
++	u16 old = READ_ONCE(c->value);
++	u16 new = ssh_rqid_next_valid(old);
++	u16 ret;
++
++	while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
++		old = ret;
++		new = ssh_rqid_next_valid(old);
++	}
++
++	return old;
++}
++
++
++/* -- Event notifier/callbacks. --------------------------------------------- */
++/*
++ * The notifier system is based on linux/notifier.h, specifically the SRCU
++ * implementation. The difference to that is, that some bits of the notifier
++ * call return value can be tracked across multiple calls. This is done so that
++ * handling of events can be tracked and a warning can be issued in case an
++ * event goes unhandled. The idea of that waring is that it should help discover
++ * and identify new/currently unimplemented features.
++ */
++
++
++/**
++ * ssam_event_matches_notifier() - Test if an event matches a notifier;
++ * @notif: The event notifier to test against.
++ * @event: The event to test.
++ *
++ * Return: Returns %true iff the given event matches the given notifier
++ * according to the rules set in the notifier's event mask, %false otherwise.
++ */
++static bool ssam_event_matches_notifier(
++		const struct ssam_event_notifier *notif,
++		const struct ssam_event *event)
++{
++	bool match = notif->event.id.target_category == event->target_category;
++
++	if (notif->event.mask & SSAM_EVENT_MASK_TARGET)
++		match &= notif->event.reg.target_id == event->target_id;
++
++	if (notif->event.mask & SSAM_EVENT_MASK_INSTANCE)
++		match &= notif->event.id.instance == event->instance_id;
++
++	return match;
++}
++
++/**
++ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
++ * @nh:    The notifier head for which the notifier callbacks should be called.
++ * @event: The event data provided to the callbacks.
++ *
++ * Call all registered notifier callbacks in order of their priority until
++ * either no notifier is left or a notifier returns a value with the
++ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
++ * ssam_notifier_from_errno() on any non-zero error value.
++ *
++ * Return: Returns the notifier status value, which contains the notifier
++ * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
++ * potential error value returned from the last executed notifier callback.
++ * Use ssam_notifier_to_errno() to convert this value to the original error
++ * value.
++ */
++static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
++{
++	struct ssam_notifier_block *nb, *next_nb;
++	struct ssam_event_notifier *nf;
++	int ret = 0, idx;
++
++	idx = srcu_read_lock(&nh->srcu);
++
++	nb = rcu_dereference_raw(nh->head);
++	while (nb) {
++		nf = container_of(nb, struct ssam_event_notifier, base);
++		next_nb = rcu_dereference_raw(nb->next);
++
++		if (ssam_event_matches_notifier(nf, event)) {
++			ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nf, event);
++			if (ret & SSAM_NOTIF_STOP)
++				break;
++		}
++
++		nb = next_nb;
++	}
++
++	srcu_read_unlock(&nh->srcu, idx);
++	return ret;
++}
++
++/**
++ * __ssam_nfblk_insert() - Insert a new notifier block into the given notifier
++ * list.
++ * @nh: The notifier head into which the block should be inserted.
++ * @nb: The notifier block to add.
++ *
++ * Note: This function must be synchronized by the caller with respect to other
++ * insert and/or remove calls.
++ *
++ * Return: Returns zero on success, %-EINVAL if the notifier block has already
++ * been registered.
++ */
++static int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
++{
++	struct ssam_notifier_block **link = &nh->head;
++
++	while ((*link) != NULL) {
++		if (unlikely((*link) == nb)) {
++			WARN(1, "double register detected");
++			return -EINVAL;
++		}
++
++		if (nb->priority > (*link)->priority)
++			break;
++
++		link = &((*link)->next);
++	}
++
++	nb->next = *link;
++	rcu_assign_pointer(*link, nb);
++
++	return 0;
++}
++
++/**
++ * __ssam_nfblk_find_link() - Find a notifier block link on the given list.
++ * @nh: The notifier head on wich the search should be conducted.
++ * @nb: The notifier block to search for.
++ *
++ * Note: This function must be synchronized by the caller with respect to
++ * insert and/or remove calls.
++ *
++ * Return: Returns a pointer to the link (i.e. pointer pointing) to the given
++ * notifier block, from the previous node in the list, or %NULL if the given
++ * notifier block is not contained in the notifier list.
++ */
++static struct ssam_notifier_block **__ssam_nfblk_find_link(
++		struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
++{
++	struct ssam_notifier_block **link = &nh->head;
++
++	while ((*link) != NULL) {
++		if ((*link) == nb)
++			return link;
++
++		link = &((*link)->next);
++	}
++
++	return NULL;
++}
++
++/**
++ * __ssam_nfblk_erase() - Erase a notifier block link in the given notifier
++ * list.
++ * @link: The link to be erased.
++ *
++ * Note: This function must be synchronized by the caller with respect to
++ * other insert and/or remove/erase/find calls. The caller _must_ ensure SRCU
++ * synchronization by calling synchronize_srcu() with ``nh->srcu`` after
++ * leaving the critical section, to ensure that the removed notifier block is
++ * not in use any more.
++ */
++static void __ssam_nfblk_erase(struct ssam_notifier_block **link)
++{
++	rcu_assign_pointer(*link, (*link)->next);
++}
++
++
++/**
++ * __ssam_nfblk_remove() - Remove a notifier block from the given notifier list.
++ * @nh: The notifier head from which the block should be removed.
++ * @nb: The notifier block to remove.
++ *
++ * Note: This function must be synchronized by the caller with respect to
++ * other insert and/or remove calls. On success, the caller *must* ensure SRCU
++ * synchronization by calling synchronize_srcu() with ``nh->srcu`` after
++ * leaving the critical section, to ensure that the removed notifier block is
++ * not in use any more.
++ *
++ * Return: Returns zero on success, %-ENOENT if the specified notifier block
++ * could not be found on the notifier list.
++ */
++static int __ssam_nfblk_remove(struct ssam_nf_head *nh,
++			       struct ssam_notifier_block *nb)
++{
++	struct ssam_notifier_block **link;
++
++	link = __ssam_nfblk_find_link(nh, nb);
++	if (!link)
++		return -ENOENT;
++
++	__ssam_nfblk_erase(link);
++	return 0;
++}
++
++/**
++ * ssam_nf_head_init() - Initialize the given notifier head.
++ * @nh: The notifier head to initialize.
++ */
++static int ssam_nf_head_init(struct ssam_nf_head *nh)
++{
++	int status;
++
++	status = init_srcu_struct(&nh->srcu);
++	if (status)
++		return status;
++
++	nh->head = NULL;
++	return 0;
++}
++
++/**
++ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
++ * @nh: The notifier head to deinitialize.
++ */
++static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
++{
++	cleanup_srcu_struct(&nh->srcu);
++}
++
++
++/* -- Event/notification registry. ------------------------------------------ */
++
++/**
++ * struct ssam_nf_refcount_key - Key used for event activation reference
++ * counting.
++ * @reg: The registry via which the event is enabled/disabled.
++ * @id:  The ID uniquely describing the event.
++ */
++struct ssam_nf_refcount_key {
++	struct ssam_event_registry reg;
++	struct ssam_event_id id;
++};
++
++/**
++ * struct ssam_nf_refcount_entry - RB-tree entry for referecnce counting event
++ * activations.
++ * @node:     The node of this entry in the rb-tree.
++ * @key:      The key of the event.
++ * @refcount: The reference-count of the event.
++ * @flags:    The flags used when enabling the event.
++ */
++struct ssam_nf_refcount_entry {
++	struct rb_node node;
++	struct ssam_nf_refcount_key key;
++	int refcount;
++	u8 flags;
++};
++
++
++/**
++ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
++ * event.
++ * @nf:  The notifier system reference.
++ * @reg: The registry used to enable/disable the event.
++ * @id:  The event ID.
++ *
++ * Increments the reference-/activation-count associated with the specified
++ * event type/ID, allocating a new entry for this event ID if necessary. A
++ * newly allocated entry will have a refcount of one.
++ *
++ * Return: Returns the refcount entry on success. Returns ``ERR_PTR(-ENOSPC)``
++ * if there have already been %INT_MAX events of the specified ID and type
++ * registered, or ``ERR_PTR(-ENOMEM)`` if the entry could not be allocated.
++ */
++static struct ssam_nf_refcount_entry *ssam_nf_refcount_inc(
++		struct ssam_nf *nf, struct ssam_event_registry reg,
++		struct ssam_event_id id)
++{
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_refcount_key key;
++	struct rb_node **link = &nf->refcount.rb_node;
++	struct rb_node *parent = NULL;
++	int cmp;
++
++	key.reg = reg;
++	key.id = id;
++
++	while (*link) {
++		entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
++		parent = *link;
++
++		cmp = memcmp(&key, &entry->key, sizeof(key));
++		if (cmp < 0) {
++			link = &(*link)->rb_left;
++		} else if (cmp > 0) {
++			link = &(*link)->rb_right;
++		} else if (entry->refcount < INT_MAX) {
++			entry->refcount++;
++			return entry;
++		} else {
++			return ERR_PTR(-ENOSPC);
++		}
++	}
++
++	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
++	if (!entry)
++		return ERR_PTR(-ENOMEM);
++
++	entry->key = key;
++	entry->refcount = 1;
++
++	rb_link_node(&entry->node, parent, link);
++	rb_insert_color(&entry->node, &nf->refcount);
++
++	return entry;
++}
++
++/**
++ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
++ * event.
++ * @nf:  The notifier system reference.
++ * @reg: The registry used to enable/disable the event.
++ * @id:  The event ID.
++ *
++ * Decrements the reference-/activation-count of the specified event,
++ * returning its entry. If the returned entry has a refcount of zero, the
++ * caller is responsible for freeing it using kfree().
++ *
++ * Return: Returns the refcount entry on success or %NULL if the entry has not
++ * been found.
++ */
++static struct ssam_nf_refcount_entry *ssam_nf_refcount_dec(
++		struct ssam_nf *nf, struct ssam_event_registry reg,
++		struct ssam_event_id id)
++{
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_refcount_key key;
++	struct rb_node *node = nf->refcount.rb_node;
++	int cmp;
++
++	key.reg = reg;
++	key.id = id;
++
++	while (node) {
++		entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
++
++		cmp = memcmp(&key, &entry->key, sizeof(key));
++		if (cmp < 0) {
++			node = node->rb_left;
++		} else if (cmp > 0) {
++			node = node->rb_right;
++		} else {
++			entry->refcount--;
++			if (entry->refcount == 0)
++				rb_erase(&entry->node, &nf->refcount);
++
++			return entry;
++		}
++	}
++
++	return NULL;
++}
++
++/**
++ * ssam_nf_refcount_empty() - Test if the notification system has any
++ * enabled/active events.
++ * @nf: The notification system.
++ */
++static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
++{
++	return RB_EMPTY_ROOT(&nf->refcount);
++}
++
++/**
++ * ssam_nf_call() - Call notification callbacks for the provided event.
++ * @nf:    The notifier system
++ * @dev:   The associated device, only used for logging.
++ * @rqid:  The request ID of the event.
++ * @event: The event provided to the callbacks.
++ *
++ * Executa registered callbacks in order of their priority until either no
++ * callback is left or a callback returned a value with the %SSAM_NOTIF_STOP
++ * bit set. Note that this bit is set automatically when converting non.zero
++ * error values via ssam_notifier_from_errno() to notifier values.
++ *
++ * Also note that any callback that could handle an event should return a value
++ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
++ * unhandled/ignored. In case no registered callback could handle an event,
++ * this function will emit a warning.
++ *
++ * In case a callback failed, this function will emit an error message.
++ */
++static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
++			 struct ssam_event *event)
++{
++	struct ssam_nf_head *nf_head;
++	int status, nf_ret;
++
++	if (!ssh_rqid_is_event(rqid)) {
++		dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid);
++		return;
++	}
++
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++	nf_ret = ssam_nfblk_call_chain(nf_head, event);
++	status = ssam_notifier_to_errno(nf_ret);
++
++	if (status < 0) {
++		dev_err(dev, "event: error handling event: %d "
++			"(tc: 0x%02x, tid: 0x%02x, cid: 0x%02x, iid: 0x%02x)\n",
++			status, event->target_category, event->target_id,
++			event->command_id, event->instance_id);
++	}
++
++	if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
++		dev_warn(dev, "event: unhandled event (rqid: 0x%02x, "
++			 "tc: 0x%02x, tid: 0x%02x, cid: 0x%02x, iid: 0x%02x)\n",
++			 rqid, event->target_category, event->target_id,
++			 event->command_id, event->instance_id);
++	}
++}
++
++/**
++ * ssam_nf_init() - Initialize the notifier system.
++ * @nf: The notifier system to initialize.
++ */
++static int ssam_nf_init(struct ssam_nf *nf)
++{
++	int i, status;
++
++	for (i = 0; i < SSH_NUM_EVENTS; i++) {
++		status = ssam_nf_head_init(&nf->head[i]);
++		if (status)
++			break;
++	}
++
++	if (status) {
++		for (i = i - 1; i >= 0; i--)
++			ssam_nf_head_destroy(&nf->head[i]);
++
++		return status;
++	}
++
++	mutex_init(&nf->lock);
++	return 0;
++}
++
++/**
++ * ssam_nf_destroy() - Deinitialize the notifier system.
++ * @nf: The notifier system to deinitialize.
++ */
++static void ssam_nf_destroy(struct ssam_nf *nf)
++{
++	int i;
++
++	for (i = 0; i < SSH_NUM_EVENTS; i++)
++		ssam_nf_head_destroy(&nf->head[i]);
++
++	mutex_destroy(&nf->lock);
++}
++
++
++/* -- Event/async request completion system. -------------------------------- */
++
++#define SSAM_CPLT_WQ_NAME	"ssam_cpltq"
++
++/*
++ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
++ * &struct ssam_event_item.
++ *
++ * This length has been chosen to be accommodate standard touchpad and
++ * keyboard input events. Events with larger payloads will be allocated
++ * separately.
++ */
++#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN	32
++
++static struct kmem_cache *ssam_event_item_cache;
++
++/**
++ * ssam_event_item_cache_init() - Initialize the event item cache.
++ */
++int ssam_event_item_cache_init(void)
++{
++	const unsigned int size = sizeof(struct ssam_event_item)
++				  + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
++	const unsigned int align = __alignof__(struct ssam_event_item);
++	struct kmem_cache *cache;
++
++	cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
++	if (!cache)
++		return -ENOMEM;
++
++	ssam_event_item_cache = cache;
++	return 0;
++}
++
++/**
++ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
++ */
++void ssam_event_item_cache_destroy(void)
++{
++	kmem_cache_destroy(ssam_event_item_cache);
++	ssam_event_item_cache = NULL;
++}
++
++static void __ssam_event_item_free_cached(struct ssam_event_item *item)
++{
++	kmem_cache_free(ssam_event_item_cache, item);
++}
++
++static void __ssam_event_item_free_generic(struct ssam_event_item *item)
++{
++	kfree(item);
++}
++
++/**
++ * ssam_event_item_free() - Free the provided event item.
++ * @item: The event item to free.
++ */
++static void ssam_event_item_free(struct ssam_event_item *item)
++{
++	trace_ssam_event_item_free(item);
++	item->ops.free(item);
++}
++
++/**
++ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
++ * @len:   The event payload length.
++ * @flags: The flags used for allocation.
++ *
++ * Allocate an event item with the given payload size, preferring allocation
++ * from the event item cache if the payload is small enough (i.e. smaller than
++ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
++ * length values. The item free callback (``ops.free``) should not be
++ * overwritten after this call.
++ *
++ * Return: Returns the newly allocated event item.
++ */
++static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
++{
++	struct ssam_event_item *item;
++
++	if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
++		item = kmem_cache_alloc(ssam_event_item_cache, GFP_KERNEL);
++		if (!item)
++			return NULL;
++
++		item->ops.free = __ssam_event_item_free_cached;
++	} else {
++		item = kzalloc(sizeof(*item) + len, GFP_KERNEL);
++		if (!item)
++			return NULL;
++
++		item->ops.free = __ssam_event_item_free_generic;
++	}
++
++	item->event.length = len;
++
++	trace_ssam_event_item_alloc(item, len);
++	return item;
++}
++
++
++/**
++ * ssam_event_queue_push() - Push an event item to the event queue.
++ * @q:    The event queue.
++ * @item: The item to add.
++ */
++static void ssam_event_queue_push(struct ssam_event_queue *q,
++				  struct ssam_event_item *item)
++{
++	spin_lock(&q->lock);
++	list_add_tail(&item->node, &q->head);
++	spin_unlock(&q->lock);
++}
++
++/**
++ * ssam_event_queue_pop() - Pop the next event item from the event queue.
++ * @q: The event queue.
++ *
++ * Returns and removes the next event item from the queue. Returns %NULL If
++ * there is no event item left.
++ */
++static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
++{
++	struct ssam_event_item *item;
++
++	spin_lock(&q->lock);
++	item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
++	if (item)
++		list_del(&item->node);
++	spin_unlock(&q->lock);
++
++	return item;
++}
++
++/**
++ * ssam_event_queue_is_empty() - Check if the event queue is empty.
++ * @q: The event queue.
++ */
++static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
++{
++	bool empty;
++
++	spin_lock(&q->lock);
++	empty = list_empty(&q->head);
++	spin_unlock(&q->lock);
++
++	return empty;
++}
++
++/**
++ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
++ * @cplt: The completion system on which to look for the queue.
++ * @tid:  The target ID of the queue.
++ * @rqid: The request ID representing the event ID for which to get the queue.
++ *
++ * Return: Returns the event queue corresponding to the event type described
++ * by the given parameters. If the request ID does not represent an event,
++ * this function returns %NULL. If the target ID is not supported, this
++ * function will fall back to the default target ID (``tid = 1``).
++ */
++static struct ssam_event_queue *ssam_cplt_get_event_queue(
++		struct ssam_cplt *cplt, u8 tid, u16 rqid)
++{
++	u16 event = ssh_rqid_to_event(rqid);
++	u16 tidx = ssh_tid_to_index(tid);
++
++	if (!ssh_rqid_is_event(rqid)) {
++		dev_err(cplt->dev, "event: unsupported request ID: 0x%04x\n", rqid);
++		return NULL;
++	}
++
++	if (!ssh_tid_is_valid(tid)) {
++		dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
++		tidx = 0;
++	}
++
++	return &cplt->event.target[tidx].queue[event];
++}
++
++/**
++ * ssam_cplt_submit() - Submit a work item to the compeltion system workqueue.
++ * @cplt: The completion system.
++ * @work: The work item to submit.
++ */
++static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
++{
++	return queue_work(cplt->wq, work);
++}
++
++/**
++ * ssam_cplt_submit_event() - Submit an event to the completion system.
++ * @cplt: The completion system.
++ * @item: The event item to submit.
++ *
++ * Submits the event to the completion system by queuing it on the event item
++ * queue and queuing the respective event queue work item on the completion
++ * workqueue, which will eventually complete the event.
++ *
++ * Return: Returns zero on success, %-EINVAL if there is no event queue that
++ * can handle the given event item.
++ */
++static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
++				  struct ssam_event_item *item)
++{
++	struct ssam_event_queue *evq;
++
++	evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
++	if (!evq)
++		return -EINVAL;
++
++	ssam_event_queue_push(evq, item);
++	ssam_cplt_submit(cplt, &evq->work);
++	return 0;
++}
++
++/**
++ * ssam_cplt_flush() - Flush the completion system.
++ * @cplt: The completion system.
++ *
++ * Flush the completion system by waiting until all currently submitted work
++ * items have been completed.
++ *
++ * Note: This function does not guarantee that all events will have been
++ * handled once this call terminates. In case of a larger number of
++ * to-be-completed events, the event queue work function may re-schedule its
++ * work item, which this flush operation will ignore.
++ *
++ * This operation is only intended to, during normal operation prior to
++ * shutdown, try to complete most events and requests to get them out of the
++ * system while the system is still fully operational. It does not aim to
++ * provide any guraantee that all of them have been handled.
++ */
++static void ssam_cplt_flush(struct ssam_cplt *cplt)
++{
++	flush_workqueue(cplt->wq);
++}
++
++static void ssam_event_queue_work_fn(struct work_struct *work)
++{
++	struct ssam_event_queue *queue;
++	struct ssam_event_item *item;
++	struct ssam_nf *nf;
++	struct device *dev;
++	int i;
++
++	queue = container_of(work, struct ssam_event_queue, work);
++	nf = &queue->cplt->event.notif;
++	dev = queue->cplt->dev;
++
++	// limit number of processed events to avoid livelocking
++	for (i = 0; i < 10; i++) {
++		item = ssam_event_queue_pop(queue);
++		if (item == NULL)
++			return;
++
++		ssam_nf_call(nf, dev, item->rqid, &item->event);
++		ssam_event_item_free(item);
++	}
++
++	if (!ssam_event_queue_is_empty(queue))
++		ssam_cplt_submit(queue->cplt, &queue->work);
++}
++
++/**
++ * ssam_event_queue_init() - Initialize an event queue.
++ * @cplt: The completion system on which the queue resides.
++ * @evq:  The event queue to initialize.
++ */
++static void ssam_event_queue_init(struct ssam_cplt *cplt,
++				  struct ssam_event_queue *evq)
++{
++	evq->cplt = cplt;
++	spin_lock_init(&evq->lock);
++	INIT_LIST_HEAD(&evq->head);
++	INIT_WORK(&evq->work, ssam_event_queue_work_fn);
++}
++
++/**
++ * ssam_cplt_init() - Initialize completion system.
++ * @cplt: The completion system to initialize.
++ * @dev:  The device used for logging.
++ */
++static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
++{
++	struct ssam_event_target *target;
++	int status, c, i;
++
++	cplt->dev = dev;
++
++	cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
++	if (!cplt->wq)
++		return -ENOMEM;
++
++	for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
++		target = &cplt->event.target[c];
++
++		for (i = 0; i < ARRAY_SIZE(target->queue); i++)
++			ssam_event_queue_init(cplt, &target->queue[i]);
++	}
++
++	status = ssam_nf_init(&cplt->event.notif);
++	if (status)
++		destroy_workqueue(cplt->wq);
++
++	return status;
++}
++
++/**
++ * ssam_cplt_destroy() - Deinitialize the completion system.
++ * @cplt: The completion system to deinitialize.
++ *
++ * Deinitialize the given completion system and ensure that all pending, i.e.
++ * yet-to-be-completed, event items and requests have been handled.
++ */
++static void ssam_cplt_destroy(struct ssam_cplt *cplt)
++{
++	/*
++	 * Note: destroy_workqueue ensures that all currently queued work will
++	 * be fully completed and the workqueue drained. This means that this
++	 * call will inherently also free any queued ssam_event_items, thus we
++	 * don't have to take care of that here explicitly.
++	 */
++	destroy_workqueue(cplt->wq);
++	ssam_nf_destroy(&cplt->event.notif);
++}
++
++
++/* -- Main SSAM device structures. ------------------------------------------ */
++
++/**
++ * ssam_controller_device() - Get the &struct device associated with this
++ * controller.
++ * @c: The controller for which to get the device.
++ *
++ * Return: Returns the &struct device associated with this controller,
++ * providing its lower-level transport.
++ */
++struct device *ssam_controller_device(struct ssam_controller *c)
++{
++	return ssh_rtl_get_device(&c->rtl);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_device);
++
++static void __ssam_controller_release(struct kref *kref)
++{
++	struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
++
++	ssam_controller_destroy(ctrl);
++	kfree(ctrl);
++}
++
++/**
++ * ssam_controller_get() - Increment reference count of controller.
++ * @c: The controller.
++ *
++ * Return: Returns the controller provided as input.
++ */
++struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
++{
++	if (c)
++		kref_get(&c->kref);
++	return c;
++}
++EXPORT_SYMBOL_GPL(ssam_controller_get);
++
++/**
++ * ssam_controller_put() - Decrement reference count of controller.
++ * @c: The controller.
++ */
++void ssam_controller_put(struct ssam_controller *c)
++{
++	if (c)
++		kref_put(&c->kref, __ssam_controller_release);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_put);
++
++
++/**
++ * ssam_controller_statelock() - Lock the controller against state transitions.
++ * @c: The controller to lock.
++ *
++ * Lock the controller against state transitions. Holding this lock guarantees
++ * that the controller will not transition between states, i.e. if the
++ * controller is in state "started", when this lock has been acquired, it will
++ * remain in this state at least until the lock has been released.
++ *
++ * Multiple clients may concurrently hold this lock. In other words: The
++ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
++ * Actions causing state transitions of the controller must be executed while
++ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
++ * and ssam_controller_unlock() for that).
++ *
++ * See ssam_controller_stateunlock() for the corresponding unlock function.
++ */
++void ssam_controller_statelock(struct ssam_controller *c)
++{
++	down_read(&c->lock);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_statelock);
++
++/**
++ * ssam_controller_stateunlock() - Unlock controller state transitions.
++ * @c: The controller to unlock.
++ *
++ * See ssam_controller_statelock() for the corresponding lock function.
++ */
++void ssam_controller_stateunlock(struct ssam_controller *c)
++{
++	up_read(&c->lock);
++}
++EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
++
++/**
++ * ssam_controller_lock() - Acquire the main controller lock.
++ * @c: The controller to lock.
++ *
++ * This lock must be held for any state transitions, including transition to
++ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
++ * for more details on controller locking.
++ *
++ * See ssam_controller_unlock() for the corresponding unlock function.
++ */
++void ssam_controller_lock(struct ssam_controller *c)
++{
++	down_write(&c->lock);
++}
++
++/*
++ * ssam_controller_unlock() - Release the main controller lock.
++ * @c: The controller to unlock.
++ *
++ * See ssam_controller_lock() for the corresponding lock function.
++ */
++void ssam_controller_unlock(struct ssam_controller *c)
++{
++	up_write(&c->lock);
++}
++
++
++static void ssam_handle_event(struct ssh_rtl *rtl,
++			      const struct ssh_command *cmd,
++			      const struct ssam_span *data)
++{
++	struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
++	struct ssam_event_item *item;
++
++	item = ssam_event_item_alloc(data->len, GFP_KERNEL);
++	if (!item)
++		return;
++
++	item->rqid = get_unaligned_le16(&cmd->rqid);
++	item->event.target_category = cmd->tc;
++	item->event.target_id = cmd->tid_in;
++	item->event.command_id = cmd->cid;
++	item->event.instance_id = cmd->iid;
++	memcpy(&item->event.data[0], data->ptr, data->len);
++
++	WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item));
++}
++
++static const struct ssh_rtl_ops ssam_rtl_ops = {
++	.handle_event = ssam_handle_event,
++};
++
++
++static bool ssam_notifier_empty(struct ssam_controller *ctrl);
++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
++
++
++#define SSAM_SSH_DSM_REVISION	0
++static const guid_t SSAM_SSH_DSM_GUID = GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
++		0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
++
++enum ssh_dsm_fn {
++	SSH_DSM_FN_SSH_POWER_PROFILE             = 0x05,
++	SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT  = 0x06,
++	SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
++	SSH_DSM_FN_D3_CLOSES_HANDLE              = 0x08,
++	SSH_DSM_FN_SSH_BUFFER_SIZE               = 0x09,
++};
++
++static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
++{
++	union acpi_object *obj;
++	u64 mask = 0;
++	int i;
++
++	*funcs = 0;
++
++	if (!acpi_has_method(handle, "_DSM"))
++		return 0;
++
++	obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
++				      SSAM_SSH_DSM_REVISION, 0, NULL,
++				      ACPI_TYPE_BUFFER);
++	if (!obj)
++		return -EFAULT;
++
++	for (i = 0; i < obj->buffer.length && i < 8; i++)
++		mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
++
++	if (mask & 0x01)
++		*funcs = mask;
++
++	ACPI_FREE(obj);
++	return 0;
++}
++
++static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
++{
++	union acpi_object *obj;
++	u64 val;
++
++	if (!(funcs & BIT(func)))
++		return 0;
++
++	obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
++				      SSAM_SSH_DSM_REVISION, func, NULL,
++				      ACPI_TYPE_INTEGER);
++	if (!obj)
++		return -EFAULT;
++
++	val = obj->integer.value;
++	ACPI_FREE(obj);
++
++	if (val > U32_MAX)
++		return -ERANGE;
++
++	*ret = val;
++	return 0;
++}
++
++/**
++ * ssam_controller_caps_load_from_acpi() - Load controller capabilities from
++ * ACPI _DSM.
++ * @handle: The handle of the ACPI controller/SSH device.
++ * @caps:   Where to store the capabilities in.
++ *
++ * Initializes the given controller capabilities with default values, then
++ * checks and, if the respective _DSM functions are available, loads the
++ * actual capabilities from the _DSM.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++static int ssam_controller_caps_load_from_acpi(
++		acpi_handle handle, struct ssam_controller_caps *caps)
++{
++	u32 d3_closes_handle = false;
++	u64 funcs;
++	int status;
++
++	// set defaults
++	caps->ssh_power_profile = (u32)-1;
++	caps->screen_on_sleep_idle_timeout = (u32)-1;
++	caps->screen_off_sleep_idle_timeout = (u32)-1;
++	caps->d3_closes_handle = false;
++	caps->ssh_buffer_size = (u32)-1;
++
++	status = ssam_dsm_get_functions(handle, &funcs);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
++				   &caps->ssh_power_profile);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs,
++				   SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
++				   &caps->screen_on_sleep_idle_timeout);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs,
++				   SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
++				   &caps->screen_off_sleep_idle_timeout);
++	if (status)
++		return status;
++
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
++				   &d3_closes_handle);
++	if (status)
++		return status;
++
++	caps->d3_closes_handle = !!d3_closes_handle;
++
++	status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
++				   &caps->ssh_buffer_size);
++	if (status)
++		return status;
++
++	return 0;
++}
++
++/**
++ * ssam_controller_init() - Initialize SSAM controller.
++ * @ctrl:   The controller to initialize.
++ * @serdev: The serial device representing the underlying data transport.
++ *
++ * Initializes the given controller. Does neither start receiver nor
++ * transmitter threads. After this call, the controller has to be hooked up to
++ * the serdev core separately via &struct serdev_device_ops, relaying calls to
++ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
++ * controller has been hooked up, transmitter and receiver threads may be
++ * started via ssam_controller_start(). These setup steps need to be completed
++ * before controller can be used for requests.
++ */
++int ssam_controller_init(struct ssam_controller *ctrl,
++			 struct serdev_device *serdev)
++{
++	acpi_handle handle = ACPI_HANDLE(&serdev->dev);
++	int status;
++
++	init_rwsem(&ctrl->lock);
++	kref_init(&ctrl->kref);
++
++	status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
++	if (status)
++		return status;
++
++	dev_dbg(&serdev->dev,
++		"device capabilities:\n"
++		"  ssh_power_profile:             %u\n"
++		"  ssh_buffer_size:               %u\n"
++		"  screen_on_sleep_idle_timeout:  %u\n"
++		"  screen_off_sleep_idle_timeout: %u\n"
++		"  d3_closes_handle:              %u\n",
++		ctrl->caps.ssh_power_profile,
++		ctrl->caps.ssh_buffer_size,
++		ctrl->caps.screen_on_sleep_idle_timeout,
++		ctrl->caps.screen_off_sleep_idle_timeout,
++		ctrl->caps.d3_closes_handle);
++
++	ssh_seq_reset(&ctrl->counter.seq);
++	ssh_rqid_reset(&ctrl->counter.rqid);
++
++	// initialize event/request completion system
++	status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
++	if (status)
++		return status;
++
++	// initialize request and packet transport layers
++	status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
++	if (status) {
++		ssam_cplt_destroy(&ctrl->cplt);
++		return status;
++	}
++
++	/*
++	 * Set state via write_once even though we expect to be in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
++	return 0;
++}
++
++/**
++ * ssam_controller_start() - Start the receiver and transmitter threads of the
++ * controller.
++ * @ctrl: The controller.
++ *
++ * Note: When this function is called, the controller shouldbe properly hooked
++ * up to the serdev core via &struct serdev_device_ops. Please refert to
++ * ssam_controller_init() for more details on controller initialization.
++ *
++ * This function must be called from an exclusive context with regards to the
++ * state, if necessary, by locking the controller via ssam_controller_lock().
++ */
++int ssam_controller_start(struct ssam_controller *ctrl)
++{
++	int status;
++
++	if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
++		return -EINVAL;
++
++	status = ssh_rtl_start(&ctrl->rtl);
++	if (status)
++		return status;
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
++	return 0;
++}
++
++/**
++ * ssam_controller_shutdown() - Shut down the controller.
++ * @ctrl: The controller.
++ *
++ * Shuts down the controller by flushing all pending requests and stopping the
++ * transmitter and receiver threads. All requests submitted after this call
++ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
++ * is safe to use in parallel with ongoing request submission.
++ *
++ * In the course of this shutdown procedure, all currently registered
++ * notifiers will be unregistered. It is, however, strongly recommended to not
++ * rely on this behavior, and instead the party registring the notifier should
++ * unregister it before the controller gets shut down, e.g. via the SSAM bus
++ * which guarantees client devices to be removed before a shutdown.
++ *
++ * Note that events may still be pending after this call, but due to the
++ * notifiers being unregistered, the will be dropped when the controller is
++ * subsequently being destroyed via ssam_controller_destroy().
++ *
++ * This function must be called from an exclusive context with regards to the
++ * state, if necessary, by locking the controller via ssam_controller_lock().
++ */
++void ssam_controller_shutdown(struct ssam_controller *ctrl)
++{
++	enum ssam_controller_state s = ctrl->state;
++	int status;
++
++	if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
++		return;
++
++	// try to flush pending events and requests while everything still works
++	status = ssh_rtl_flush(&ctrl->rtl, msecs_to_jiffies(5000));
++	if (status) {
++		ssam_err(ctrl, "failed to flush request transport layer: %d\n",
++			 status);
++	}
++
++	// try to flush out all currently completing requests and events
++	ssam_cplt_flush(&ctrl->cplt);
++
++	/*
++	 * We expect all notifiers to have been removed by the respective client
++	 * driver that set them up at this point. If this warning occurs, some
++	 * client driver has not done that...
++	 */
++	WARN_ON(!ssam_notifier_empty(ctrl));
++
++	/*
++	 * Nevertheless, we should still take care of drivers that don't behave
++	 * well. Thus disable all enabled events, unregister all notifiers.
++	 */
++	ssam_notifier_unregister_all(ctrl);
++
++	// cancel rem. requests, ensure no new ones can be queued, stop threads
++	ssh_rtl_shutdown(&ctrl->rtl);
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
++	ctrl->rtl.ptl.serdev = NULL;
++}
++
++/**
++ * ssam_controller_destroy() - Destroy the controller and free its resources.
++ * @ctrl: The controller.
++ *
++ * Ensures that all resources associated with the controller get freed. This
++ * function should only be called after the controller has been stopped via
++ * ssam_controller_shutdown(). In general, this function should not be called
++ * directly. The only valid place to call this function direclty is during
++ * initialization, before the controller has been fully initialized and passed
++ * to other processes. This function is called automatically when the
++ * reference count of the controller reaches zero.
++ *
++ * Must be called from an exclusive context with regards to the controller
++ * state.
++ */
++void ssam_controller_destroy(struct ssam_controller *ctrl)
++{
++	if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
++		return;
++
++	WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
++
++	/*
++	 * Note: New events could still have been received after the previous
++	 * flush in ssam_controller_shutdown, before the request transport layer
++	 * has been shut down. At this point, after the shutdown, we can be sure
++	 * that no new events will be queued. The call to ssam_cplt_destroy will
++	 * ensure that those remaining are being completed and freed.
++	 */
++
++	// actually free resources
++	ssam_cplt_destroy(&ctrl->cplt);
++	ssh_rtl_destroy(&ctrl->rtl);
++
++	/*
++	 * Set state via write_once even though we expect to be locked/in an
++	 * exclusive context, due to smoke-testing in
++	 * ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
++}
++
++/**
++ * ssam_controller_suspend() - Suspend the controller.
++ * @ctrl: The controller to suspend.
++ *
++ * Marks the controller as suspended. Note that display-off and D0-exit
++ * notifications have to be sent manually before transitioning the controller
++ * into the suspended state via this function.
++ *
++ * See ssam_controller_resume() for the corresponding resume function.
++ *
++ * Return: Returns %-EINVAL if the controller is currently not in the
++ * "started" state.
++ */
++int ssam_controller_suspend(struct ssam_controller *ctrl)
++{
++	ssam_controller_lock(ctrl);
++
++	if (ctrl->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_unlock(ctrl);
++		return -EINVAL;
++	}
++
++	ssam_dbg(ctrl, "pm: suspending controller\n");
++
++	/*
++	 * Set state via write_once even though we're locked, due to
++	 * smoke-testing in ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
++
++	ssam_controller_unlock(ctrl);
++	return 0;
++}
++
++/**
++ * ssam_controller_resume() - Resume the controller from suspend.
++ * @ctrl: The controller to resume.
++ *
++ * Resume the controller from the suspended state it was put into via
++ * ssam_controller_suspend(). This function does not issue display-on and
++ * D0-entry notifications. If required, those have to be sent manually after
++ * this call.
++ *
++ * Return: Returns %-EINVAL if the controller is currently not suspended.
++ */
++int ssam_controller_resume(struct ssam_controller *ctrl)
++{
++	ssam_controller_lock(ctrl);
++
++	if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
++		ssam_controller_unlock(ctrl);
++		return -EINVAL;
++	}
++
++	ssam_dbg(ctrl, "pm: resuming controller\n");
++
++	/*
++	 * Set state via write_once even though we're locked, due to
++	 * smoke-testing in ssam_request_sync_submit().
++	 */
++	WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
++
++	ssam_controller_unlock(ctrl);
++	return 0;
++}
++
++
++/* -- Top-level request interface ------------------------------------------- */
++
++/**
++ * ssam_request_write_data() - Construct and write SAM request message to
++ * buffer.
++ * @buf:  The buffer to write the data to.
++ * @ctrl: The controller via which the request will be sent.
++ * @spec: The request data and specification.
++ *
++ * Constructs a SAM/SSH request message and writes it to the provided buffer.
++ * The request and transport counters, specifically RQID and SEQ, will be set
++ * in this call. These counters are obtained from the controller. It is thus
++ * only valid to send the resulting message via the controller specified here.
++ *
++ * For calculation of the required buffer size, refer to the
++ * SSH_COMMAND_MESSAGE_LENGTH() macro.
++ *
++ * Return: Returns the number of bytes used in the buffer on success. Returns
++ * %-EINVAL if the payload length provided in the request specification is too
++ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
++ * is too small.
++ */
++ssize_t ssam_request_write_data(struct ssam_span *buf,
++				struct ssam_controller *ctrl,
++				const struct ssam_request *spec)
++{
++	struct msgbuf msgb;
++	u16 rqid;
++	u8 seq;
++
++	if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
++		return -EINVAL;
++
++	if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
++		return -EINVAL;
++
++	msgb_init(&msgb, buf->ptr, buf->len);
++	seq = ssh_seq_next(&ctrl->counter.seq);
++	rqid = ssh_rqid_next(&ctrl->counter.rqid);
++	msgb_push_cmd(&msgb, seq, rqid, spec);
++
++	return msgb_bytes_used(&msgb);
++}
++EXPORT_SYMBOL_GPL(ssam_request_write_data);
++
++
++static void ssam_request_sync_complete(struct ssh_request *rqst,
++				       const struct ssh_command *cmd,
++				       const struct ssam_span *data, int status)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++	struct ssam_request_sync *r;
++
++	r = container_of(rqst, struct ssam_request_sync, base);
++	r->status = status;
++
++	if (r->resp)
++		r->resp->length = 0;
++
++	if (status) {
++		rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
++		return;
++	}
++
++	if (!data)	// handle requests without a response
++		return;
++
++	if (!r->resp || !r->resp->pointer) {
++		if (data->len)
++			rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
++		return;
++	}
++
++	if (data->len > r->resp->capacity) {
++		rtl_err(rtl, "rsp: response buffer too small, capacity: %zu bytes,"
++			" got: %zu bytes\n", r->resp->capacity, data->len);
++		r->status = -ENOSPC;
++		return;
++	}
++
++	r->resp->length = data->len;
++	memcpy(r->resp->pointer, data->ptr, data->len);
++}
++
++static void ssam_request_sync_release(struct ssh_request *rqst)
++{
++	complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
++}
++
++static const struct ssh_request_ops ssam_request_sync_ops = {
++	.release = ssam_request_sync_release,
++	.complete = ssam_request_sync_complete,
++};
++
++
++/**
++ * ssam_request_sync_alloc() - Allocate a synchronous request.
++ * @payload_len: The length of the request payload.
++ * @flags:       Flags used for allocation.
++ * @rqst:        Where to store the pointer to the allocated request.
++ * @buffer:      Where to store the buffer descriptor for the message buffer of
++ *               the request.
++ *
++ * Allocates a synchronous request with corresponding message buffer. The
++ * request still needs to be initialized ssam_request_sync_init() before
++ * it can be submitted, and the message buffer data must still be set to the
++ * returned buffer via ssam_request_sync_set_data() after it has been filled,
++ * if need be with adjusted message length.
++ *
++ * After use, the request and its corresponding message buffer should be freed
++ * via ssam_request_sync_free(). The buffer must not be freed separately.
++ *
++ * Return: Returns zero on success, %-ENOMEM if the request could not be
++ * allocated.
++ */
++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
++			    struct ssam_request_sync **rqst,
++			    struct ssam_span *buffer)
++{
++	size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
++
++	*rqst = kzalloc(sizeof(**rqst) + msglen, flags);
++	if (!*rqst)
++		return -ENOMEM;
++
++	buffer->ptr = (u8 *)(*rqst + 1);
++	buffer->len = msglen;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
++
++/**
++ * ssam_request_sync_free() - Free a synchronous request.
++ * @rqst: The request to free.
++ *
++ * Free a synchronous request and its corresponding buffer allocated with
++ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
++ * or via any other function.
++ *
++ * Warning: The caller must ensure that the request is not in use any more.
++ * I.e. the caller must ensure that it has the only reference to the request
++ * and the request is not currently pending. This means that the caller has
++ * either never submitted the request, request submission has failed, or the
++ * caller has waited until the submitted request has been completed via
++ * ssam_request_sync_wait().
++ */
++void ssam_request_sync_free(struct ssam_request_sync *rqst)
++{
++	kfree(rqst);
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_free);
++
++/**
++ * ssam_request_sync_init() - Initialize a synchronous request struct.
++ * @rqst:  The request to initialize.
++ * @flags: The request flags.
++ *
++ * Initializes the given request struct. Does not initialize the request
++ * message data. This has to be done explicitly after this call via
++ * ssam_request_sync_set_data() and the actual message data has to be written
++ * via ssam_request_write_data().
++ */
++void ssam_request_sync_init(struct ssam_request_sync *rqst,
++			    enum ssam_request_flags flags)
++{
++	ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
++	init_completion(&rqst->comp);
++	rqst->resp = NULL;
++	rqst->status = 0;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_init);
++
++/**
++ * ssam_request_sync_submit() - Submit a synchronous request.
++ * @ctrl: The controller with which to submit the request.
++ * @rqst: The request to submit.
++ *
++ * Submit a synchronous request. The request has to be initialized and
++ * properly set up, including response buffer (may be %NULL if no response is
++ * expected) and command message data. This function does not wait for the
++ * request to be completed.
++ *
++ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
++ * that the request has been completed before the response data can be
++ * accessed and/or the request can be freed. On failure, the request may
++ * immediately be freed.
++ *
++ * This function may only be used if the controller is active, i.e. has been
++ * initialized and not suspended.
++ */
++int ssam_request_sync_submit(struct ssam_controller *ctrl,
++			     struct ssam_request_sync *rqst)
++{
++	int status;
++
++	/*
++	 * This is only a superficial check. In general, the caller needs to
++	 * ensure that the controller is initialized and is not (and does not
++	 * get) suspended during use, i.e. until the request has been completed
++	 * (if _absolutely_ necessary, by use of ssam_controller_statelock/
++	 * ssam_controller_stateunlock, but something like ssam_client_link
++	 * should be preferred as this needs to last until the request has been
++	 * completed).
++	 *
++	 * Note that it is actually safe to use this function while the
++	 * controller is in the process of being shut down (as ssh_rtl_submit
++	 * is safe with regards to this), but it is generally discouraged to do
++	 * so.
++	 */
++	if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
++		ssh_request_put(&rqst->base);
++		return -ENXIO;
++	}
++
++	status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
++	ssh_request_put(&rqst->base);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
++
++/**
++ * ssam_request_sync() - Execute a synchronous request.
++ * @ctrl: The controller via which the request will be submitted.
++ * @spec: The request specification and payload.
++ * @rsp:  The response buffer.
++ *
++ * Allocates a synchronous request with its message data buffer on the heap
++ * via ssam_request_sync_alloc(), fully intializes it via the provided request
++ * specification, submits it, and finally waits for its completion before
++ * freeing it and returning its status.
++ *
++ * Return: Returns the status of the request or any failure during setup.
++ */
++int ssam_request_sync(struct ssam_controller *ctrl,
++		      const struct ssam_request *spec,
++		      struct ssam_response *rsp)
++{
++	struct ssam_request_sync *rqst;
++	struct ssam_span buf;
++	ssize_t len;
++	int status;
++
++	status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
++	if (status)
++		return status;
++
++	ssam_request_sync_init(rqst, spec->flags);
++	ssam_request_sync_set_resp(rqst, rsp);
++
++	len = ssam_request_write_data(&buf, ctrl, spec);
++	if (len < 0) {
++		ssam_request_sync_free(rqst);
++		return len;
++	}
++
++	ssam_request_sync_set_data(rqst, buf.ptr, len);
++
++	status = ssam_request_sync_submit(ctrl, rqst);
++	if (!status)
++		status = ssam_request_sync_wait(rqst);
++
++	ssam_request_sync_free(rqst);
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync);
++
++/**
++ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
++ * provided buffer as backend for the message buffer.
++ * @ctrl: The controller via which the request will be submitted.
++ * @spec: The request specification and payload.
++ * @rsp:  The response buffer.
++ * @buf:  The buffer for the request message data.
++ *
++ * Allocates a synchronous request struct on the stack, fully initializes it
++ * using the provided buffer as message data buffer, submits it, and then
++ * waits for its completion before returning its staus. The
++ * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
++ * message buffer size.
++ *
++ * This function does essentially the same as ssam_request_sync(), but instead
++ * of dynamically allocating the request and message data buffer, it uses the
++ * provided message data buffer and stores the (small) request struct on the
++ * heap.
++ *
++ * Return: Returns the status of the request or any failure during setup.
++ */
++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
++				  const struct ssam_request *spec,
++				  struct ssam_response *rsp,
++				  struct ssam_span *buf)
++{
++	struct ssam_request_sync rqst;
++	ssize_t len;
++	int status;
++
++	ssam_request_sync_init(&rqst, spec->flags);
++	ssam_request_sync_set_resp(&rqst, rsp);
++
++	len = ssam_request_write_data(buf, ctrl, spec);
++	if (len < 0)
++		return len;
++
++	ssam_request_sync_set_data(&rqst, buf->ptr, len);
++
++	status = ssam_request_sync_submit(ctrl, &rqst);
++	if (!status)
++		status = ssam_request_sync_wait(&rqst);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
++
++
++/* -- Internal SAM requests. ------------------------------------------------ */
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x13,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x15,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x16,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x33,
++	.instance_id     = 0x00,
++});
++
++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
++	.target_category = SSAM_SSH_TC_SAM,
++	.target_id       = 0x01,
++	.command_id      = 0x34,
++	.instance_id     = 0x00,
++});
++
++/**
++ * struct ssh_notification_params - Command payload to enable/disable SSH
++ * notifications.
++ * @target_category: The target category for which notifications should be
++ *                   enabled/disabled.
++ * @flags:           Flags determining how notifications are being sent.
++ * @request_id:      The request ID that is used to send these notifications.
++ * @instance_id:     The specific instance in the given target category for
++ *                   which notifications should be enabled.
++ */
++struct ssh_notification_params {
++	u8 target_category;
++	u8 flags;
++	__le16 request_id;
++	u8 instance_id;
++} __packed;
++
++static_assert(sizeof(struct ssh_notification_params) == 5);
++
++/**
++ * ssam_ssh_event_enable() - Enable SSH event.
++ * @ctrl:  The controller for which to enable the event.
++ * @reg:   The event registry describing what request to use for enabling and
++ *         disabling the event.
++ * @id:    The event identifier.
++ * @flags: The event flags.
++ *
++ * This is a wrapper for the raw SAM request to enable an event, thus it does
++ * not handle referecnce counting for enable/disable of events. If an event
++ * has already been enabled, the EC will ignore this request.
++ *
++ * Return: Returns the status of the executed SAM request (zero on success and
++ * negative on direct failure) or %-EPROTO if the request response indicates a
++ * failure.
++ */
++static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
++				 struct ssam_event_registry reg,
++				 struct ssam_event_id id, u8 flags)
++{
++	struct ssh_notification_params params;
++	struct ssam_request rqst;
++	struct ssam_response result;
++	int status;
++
++	u16 rqid = ssh_tc_to_rqid(id.target_category);
++	u8 buf[1] = { 0x00 };
++
++	// only allow RQIDs that lie within event spectrum
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	params.target_category = id.target_category;
++	params.instance_id = id.instance;
++	params.flags = flags;
++	put_unaligned_le16(rqid, &params.request_id);
++
++	rqst.target_category = reg.target_category;
++	rqst.target_id = reg.target_id;
++	rqst.command_id = reg.cid_enable;
++	rqst.instance_id = 0x00;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(params);
++	rqst.payload = (u8 *)&params;
++
++	result.capacity = ARRAY_SIZE(buf);
++	result.length = 0;
++	result.pointer = buf;
++
++	status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
++	if (status) {
++		ssam_err(ctrl, "failed to enable event source (tc: 0x%02x, "
++			 "iid: 0x%02x, reg: 0x%02x)\n", id.target_category,
++			 id.instance, reg.target_category);
++	}
++
++	if (buf[0] != 0x00) {
++		ssam_err(ctrl, "unexpected result while enabling event source: "
++			 "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
++			 buf[0], id.target_category, id.instance,
++			 reg.target_category);
++		return -EPROTO;
++	}
++
++	return status;
++
++}
++
++/**
++ * ssam_ssh_event_disable() - Disable SSH event.
++ * @ctrl:  The controller for which to disable the event.
++ * @reg:   The event registry describing what request to use for enabling and
++ *         disabling the event (must be same as used when enabling the event).
++ * @id:    The event identifier.
++ * @flags: The event flags (likely ignored for disabling of events).
++ *
++ * This is a wrapper for the raw SAM request to disable an event, thus it does
++ * not handle reference counting for enable/disable of events. If an event has
++ * already been disabled, the EC will ignore this request.
++ *
++ * Return: Returns the status of the executed SAM request (zero on success and
++ * negative on direct failure) or %-EPROTO if the request response indicates a
++ * failure.
++ */
++static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
++				  struct ssam_event_registry reg,
++				  struct ssam_event_id id, u8 flags)
++{
++	struct ssh_notification_params params;
++	struct ssam_request rqst;
++	struct ssam_response result;
++	int status;
++
++	u16 rqid = ssh_tc_to_rqid(id.target_category);
++	u8 buf[1] = { 0x00 };
++
++	// only allow RQIDs that lie within event spectrum
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	params.target_category = id.target_category;
++	params.instance_id = id.instance;
++	params.flags = flags;
++	put_unaligned_le16(rqid, &params.request_id);
++
++	rqst.target_category = reg.target_category;
++	rqst.target_id = reg.target_id;
++	rqst.command_id = reg.cid_disable;
++	rqst.instance_id = 0x00;
++	rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
++	rqst.length = sizeof(params);
++	rqst.payload = (u8 *)&params;
++
++	result.capacity = ARRAY_SIZE(buf);
++	result.length = 0;
++	result.pointer = buf;
++
++	status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
++	if (status) {
++		ssam_err(ctrl, "failed to disable event source (tc: 0x%02x, "
++			 "iid: 0x%02x, reg: 0x%02x)\n", id.target_category,
++			 id.instance, reg.target_category);
++	}
++
++	if (buf[0] != 0x00) {
++		ssam_err(ctrl, "unexpected result while disabling event source: "
++			 "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
++			 buf[0], id.target_category, id.instance,
++			 reg.target_category);
++		return -EPROTO;
++	}
++
++	return status;
++}
++
++
++/* -- Wrappers for internal SAM requests. ----------------------------------- */
++
++/**
++ * ssam_get_firmware_version() - Get the SAM/EC firmware version.
++ * @ctrl:    The controller.
++ * @version: Where to store the version number.
++ *
++ * Return: Returns zero on success or the status of the executed SAM request
++ * if that request failed.
++ */
++int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
++{
++	__le32 __version;
++	int status;
++
++	status = ssam_ssh_get_firmware_version(ctrl, &__version);
++	if (status)
++		return status;
++
++	*version = le32_to_cpu(__version);
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
++ * off.
++ * @ctrl: The controller.
++ *
++ * Notify the EC that the display has been turned off and the driver may enter
++ * a lower-power state. This will prevent events from being sent directly.
++ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
++ * as there are pending events. The events then need to be manually released,
++ * one by one, via the GPIO callback request. All pending events accumulated
++ * during this state can also be released by issuing the display-on
++ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
++ * the GPIO.
++ *
++ * On some devices, specifically ones with an integrated keyboard, the keyboard
++ * backlight will be turned off by this call.
++ *
++ * This function will only send the display-off notification command if
++ * display noticications are supported by the EC. Currently all known devices
++ * support these notification.
++ *
++ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	ssam_dbg(ctrl, "pm: notifying display off\n");
++
++	status = ssam_ssh_notif_display_off(ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from display-off notification: 0x%02x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
++ * @ctrl: The controller.
++ *
++ * Notify the EC that the display has been turned back on and the driver has
++ * exited its lower-power state. This notification is the counterpart to the
++ * display-off notification sent via ssam_ctrl_notif_display_off() and will
++ * reverse its effects, including resetting events to their default behavior.
++ *
++ * This function will only send the display-on notification command if display
++ * noticications are supported by the EC. Currently all known devices support
++ * these notification.
++ *
++ * See ssam_ctrl_notif_display_off() for more details.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	ssam_dbg(ctrl, "pm: notifying display on\n");
++
++	status = ssam_ssh_notif_display_on(ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from display-on notification: 0x%02x\n",
++			 response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
++ * power state.
++ * @ctrl: The controller
++ *
++ * Notifies the EC that the driver prepares to exit the D0 power state in
++ * favor of a lower-power state. Exact effects of this function related to the
++ * EC are currently unknown.
++ *
++ * This function will only send the D0-exit notification command if D0-state
++ * noticications are supported by the EC. Only newer Surface generations
++ * support these notifications.
++ *
++ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	if (!ctrl->caps.d3_closes_handle)
++		return 0;
++
++	ssam_dbg(ctrl, "pm: notifying D0 exit\n");
++
++	status = ssam_ssh_notif_d0_exit(ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from D0-exit notification:"
++			 " 0x%02x\n", response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
++ * power state.
++ * @ctrl: The controller
++ *
++ * Notifies the EC that the driver has exited a lower-power state and entered
++ * the D0 power state. Exact effects of this function related to the EC are
++ * currently unknown.
++ *
++ * This function will only send the D0-entry notification command if D0-state
++ * noticications are supported by the EC. Only newer Surface generations
++ * support these notifications.
++ *
++ * See ssam_ctrl_notif_d0_exit() for more details.
++ *
++ * Return: Returns zero on success or if no request has been executed, the
++ * status of the executed SAM request if that request failed, or %-EPROTO if
++ * an unexpected response has been received.
++ */
++int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
++{
++	int status;
++	u8 response;
++
++	if (!ctrl->caps.d3_closes_handle)
++		return 0;
++
++	ssam_dbg(ctrl, "pm: notifying D0 entry\n");
++
++	status = ssam_ssh_notif_d0_entry(ctrl, &response);
++	if (status)
++		return status;
++
++	if (response != 0) {
++		ssam_err(ctrl, "unexpected response from D0-entry notification:"
++			 " 0x%02x\n", response);
++		return -EPROTO;
++	}
++
++	return 0;
++}
++
++
++/* -- Top-level event registry interface. ----------------------------------- */
++
++/**
++ * ssam_notifier_register() - Register an event notifier.
++ * @ctrl: The controller to register the notifier on.
++ * @n:    The event notifier to register.
++ *
++ * Register an event notifier and increment the usage counter of the
++ * associated SAM event. If the event was previously not enabled, it will be
++ * enabled during this call.
++ *
++ * Return: Returns zero on success, %-ENOSPC if there have already been
++ * %INT_MAX notifiers for the event ID/type associated with the notifier block
++ * registered, %-ENOMEM if the corresponding event entry could not be
++ * allocated. If this is the first time that a notifier block is registered
++ * for the specific associated event, returns the status of the event-enable
++ * EC-command.
++ */
++int ssam_notifier_register(struct ssam_controller *ctrl,
++			   struct ssam_event_notifier *n)
++{
++	u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_head *nf_head;
++	struct ssam_nf *nf;
++	int status;
++
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	nf = &ctrl->cplt.event.notif;
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++
++	mutex_lock(&nf->lock);
++
++	entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
++	if (IS_ERR(entry)) {
++		mutex_unlock(&nf->lock);
++		return PTR_ERR(entry);
++	}
++
++	ssam_dbg(ctrl, "enabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x,"
++		 " rc: %d)\n", n->event.reg.target_category,
++		 n->event.id.target_category, n->event.id.instance,
++		 entry->refcount);
++
++	status = __ssam_nfblk_insert(nf_head, &n->base);
++	if (status) {
++		entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
++		if (entry->refcount == 0)
++			kfree(entry);
++
++		mutex_unlock(&nf->lock);
++		return status;
++	}
++
++	if (entry->refcount == 1) {
++		status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
++					       n->event.flags);
++		if (status) {
++			__ssam_nfblk_remove(nf_head, &n->base);
++			kfree(ssam_nf_refcount_dec(nf, n->event.reg, n->event.id));
++			mutex_unlock(&nf->lock);
++			synchronize_srcu(&nf_head->srcu);
++			return status;
++		}
++
++		entry->flags = n->event.flags;
++
++	} else if (entry->flags != n->event.flags) {
++		ssam_warn(ctrl, "inconsistent flags when enabling event: got 0x%02x,"
++			  " expected 0x%02x (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x)",
++			  n->event.flags, entry->flags, n->event.reg.target_category,
++			  n->event.id.target_category, n->event.id.instance);
++	}
++
++	mutex_unlock(&nf->lock);
++	return 0;
++
++}
++EXPORT_SYMBOL_GPL(ssam_notifier_register);
++
++/**
++ * ssam_notifier_unregister() - Unregister an event notifier.
++ * @ctrl: The controller the notifier has been registered on.
++ * @n:    The event notifier to unregister.
++ *
++ * Unregister an event notifier and decrement the usage counter of the
++ * associated SAM event. If the usage counter reaches zero, the event will be
++ * disabled.
++ *
++ * Return: Returns zero on success, %-ENOENT if the given notifier block has
++ * not been registered on the controller. If the given notifier block was the
++ * last one associated with its specific event, returns the status of the
++ * event-disable EC-command.
++ */
++int ssam_notifier_unregister(struct ssam_controller *ctrl,
++			     struct ssam_event_notifier *n)
++{
++	u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
++	struct ssam_notifier_block **link;
++	struct ssam_nf_refcount_entry *entry;
++	struct ssam_nf_head *nf_head;
++	struct ssam_nf *nf;
++	int status = 0;
++
++	if (!ssh_rqid_is_event(rqid))
++		return -EINVAL;
++
++	nf = &ctrl->cplt.event.notif;
++	nf_head = &nf->head[ssh_rqid_to_event(rqid)];
++
++	mutex_lock(&nf->lock);
++
++	link = __ssam_nfblk_find_link(nf_head, &n->base);
++	if (!link) {
++		mutex_unlock(&nf->lock);
++		return -ENOENT;
++	}
++
++	entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
++	if (WARN_ON(!entry)) {
++		mutex_unlock(&nf->lock);
++		return -ENOENT;
++	}
++
++	ssam_dbg(ctrl, "disabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x,"
++		 " rc: %d)\n", n->event.reg.target_category,
++		 n->event.id.target_category, n->event.id.instance,
++		 entry->refcount);
++
++	if (entry->flags != n->event.flags) {
++		ssam_warn(ctrl, "inconsistent flags when enabling event: got 0x%02x,"
++			  " expected 0x%02x (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x)",
++			  n->event.flags, entry->flags, n->event.reg.target_category,
++			  n->event.id.target_category, n->event.id.instance);
++	}
++
++	if (entry->refcount == 0) {
++		status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
++						n->event.flags);
++		kfree(entry);
++	}
++
++	__ssam_nfblk_erase(link);
++	mutex_unlock(&nf->lock);
++	synchronize_srcu(&nf_head->srcu);
++
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
++
++/**
++ * ssam_notifier_disable_registered() - Disable events for all registered
++ * notifiers.
++ * @ctrl: The controller for which to disable the notifiers/events.
++ *
++ * Disables events for all currently registered notifiers. In case of an error
++ * (EC command failing), all previously disabled events will be restored and
++ * the error code returned.
++ *
++ * This function is intended to disable all events prior to hibenration entry.
++ * See ssam_notifier_restore_registered() to restore/re-enable all events
++ * disabled with this function.
++ *
++ * Note that this function will not disable events for notifiers registered
++ * after calling this function. It should thus be made sure that no new
++ * notifiers are going to be added after this call and before the corresponding
++ * call to ssam_notifier_restore_registered().
++ *
++ * Return: Returns zero on success. In case of failure returns the error code
++ * returned by the failed EC command to disable an event.
++ */
++int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct rb_node *n;
++	int status;
++
++	mutex_lock(&nf->lock);
++	for (n = rb_first(&nf->refcount); n != NULL; n = rb_next(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++		status = ssam_ssh_event_disable(ctrl, e->key.reg,
++						e->key.id, e->flags);
++		if (status)
++			goto err;
++	}
++	mutex_unlock(&nf->lock);
++
++	return 0;
++
++err:
++	for (n = rb_prev(n); n != NULL; n = rb_prev(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++		ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
++	}
++	mutex_unlock(&nf->lock);
++
++	return status;
++}
++
++/**
++ * ssam_notifier_restore_registered() - Restore/re-enable events for all
++ * registered notifiers.
++ * @ctrl: The controller for which to restore the notifiers/events.
++ *
++ * Restores/re-enables all events for which notifiers have been registered on
++ * the given controller. In case of a failure, the error is logged and the
++ * function continues to try and enable the remaining events.
++ *
++ * This function is intended to restore/re-enable all registered events after
++ * hibernation. See ssam_notifier_disable_registered() for the counter part
++ * disabling the events and more details.
++ */
++void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct rb_node *n;
++
++	mutex_lock(&nf->lock);
++	for (n = rb_first(&nf->refcount); n != NULL; n = rb_next(n)) {
++		struct ssam_nf_refcount_entry *e;
++
++		e = rb_entry(n, struct ssam_nf_refcount_entry, node);
++
++		// ignore errors, will get logged in call
++		ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
++	}
++	mutex_unlock(&nf->lock);
++}
++
++/**
++ * ssam_notifier_empty() - Check if there are any registered notifiers.
++ * @ctrl: The controller to check on.
++ *
++ * Return: Returns %true if there are currently no notifiers registered on the
++ * controller, %false otherwise.
++ */
++static bool ssam_notifier_empty(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	bool result;
++
++	mutex_lock(&nf->lock);
++	result = ssam_nf_refcount_empty(nf);
++	mutex_unlock(&nf->lock);
++
++	return result;
++}
++
++/**
++ * ssam_notifier_unregister_all() - Unregister all currently registered
++ * notifiers.
++ * @ctrl: The controller to unregister the notifiers on.
++ *
++ * Unregisters all currently registered notifiers. This function is used to
++ * ensure that all notifiers will be unregistered and assocaited
++ * entries/resources freed when the controller is being shut down.
++ */
++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
++{
++	struct ssam_nf *nf = &ctrl->cplt.event.notif;
++	struct ssam_nf_refcount_entry *e, *n;
++
++	mutex_lock(&nf->lock);
++	rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
++		// ignore errors, will get logged in call
++		ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
++		kfree(e);
++	}
++	nf->refcount = RB_ROOT;
++	mutex_unlock(&nf->lock);
++}
++
++
++/* -- Wakeup IRQ. ----------------------------------------------------------- */
++
++static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
++{
++	struct ssam_controller *ctrl = dev_id;
++
++	ssam_dbg(ctrl, "pm: wake irq triggered\n");
++
++	/*
++	 * Note: Proper wakeup detection is currently unimplemented.
++	 *       When the EC is in display-off or any other non-D0 state, it
++	 *       does not send events/notifications to the host. Instead it
++	 *       signals that there are events available via the wakeup IRQ.
++	 *       This driver is responsible for calling back to the EC to
++	 *       release these events one-by-one.
++	 *
++	 *       This IRQ should not cause a full system resume by its own.
++	 *       Instead, events should be handled by their respective subsystem
++	 *       drivers, which in turn should signal whether a full system
++	 *       resume should be performed.
++	 *
++	 * TODO: Send GPIO callback command repeatedly to EC until callback
++	 *       returns 0x00. Return flag of callback is "has more events".
++	 *       Each time the command is sent, one event is "released". Once
++	 *       all events have been released (return = 0x00), the GPIO is
++	 *       re-armed. Detect wakeup events during this process, go back to
++	 *       sleep if no wakeup event has been received.
++	 */
++
++	return IRQ_HANDLED;
++}
++
++/**
++ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
++ * @ctrl: The controller for which the IRQ should be set up.
++ *
++ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
++ * to wake the device from a low power state.
++ *
++ * Note that this IRQ can only be triggered while the EC is in the display-off
++ * state. In this state, events are not sent to the host in the usual way.
++ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
++ * events and these events need to be released one-by-one via the GPIO
++ * callback request, either until there are no events left and the GPIO is
++ * reset, or all at once by transitioning the EC out of the display-off state,
++ * which will also clear the GPIO.
++ *
++ * Not all events, however, should trigger a full system wakeup. Instead the
++ * driver should, if necessary, inspect and forward each event to the
++ * corresponding subsystem, which in turn should decide if the system needs to
++ * be woken up. This logic has not been implemented yet, thus wakeup by this
++ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
++ * example, by the remaining battery percentage changing. Refer to comments in
++ * this function and comments in the corresponding IRQ handler for more
++ * details on how this should be implemented.
++ *
++ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
++ * for functions to transition the EC into and out of the display-off state as
++ * well as more details on it.
++ *
++ * The IRQ is disabled by default and has to be enabled before it can wake up
++ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
++ * should be freed via ssam_irq_free().
++ */
++int ssam_irq_setup(struct ssam_controller *ctrl)
++{
++	struct device *dev = ssam_controller_device(ctrl);
++	struct gpio_desc *gpiod;
++	int irq;
++	int status;
++
++	/*
++	 * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
++	 * However, the GPIO line only gets reset by sending the GPIO callback
++	 * command to SAM (or alternatively the display-on notification). As
++	 * proper handling for this interrupt is not implemented yet, leaving
++	 * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
++	 * never gets sent and thus the line never gets reset). To avoid this,
++	 * mark the IRQ as TRIGGER_RISING for now, only creating a single
++	 * interrupt, and let the SAM resume callback during the controller
++	 * resume process clear it.
++	 */
++	const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
++
++	gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
++	if (IS_ERR(gpiod))
++		return PTR_ERR(gpiod);
++
++	irq = gpiod_to_irq(gpiod);
++	gpiod_put(gpiod);
++
++	if (irq < 0)
++		return irq;
++
++	status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
++				      "ssam_wakeup", ctrl);
++	if (status)
++		return status;
++
++	ctrl->irq.num = irq;
++	disable_irq(ctrl->irq.num);
++	return 0;
++}
++
++/**
++ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
++ * @ctrl: The controller for which the IRQ should be freed.
++ *
++ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
++ */
++void ssam_irq_free(struct ssam_controller *ctrl)
++{
++	free_irq(ctrl->irq.num, ctrl);
++	ctrl->irq.num = -1;
++}
++
++/**
++ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
++ * @ctrl: The controller for which the IRQ should be armed.
++ *
++ * Sets up the IRQ so that it can be used to wake the device. Specifically,
++ * this function enables the irq and then, if the device is allowed to wake up
++ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
++ * corresponding function to disable the IRQ.
++ *
++ * This function is intended to arm the IRQ before entering S2idle suspend.
++ *
++ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
++ * be balanced.
++ */
++int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
++{
++	struct device *dev = ssam_controller_device(ctrl);
++	int status;
++
++	enable_irq(ctrl->irq.num);
++	if (device_may_wakeup(dev)) {
++		status = enable_irq_wake(ctrl->irq.num);
++		if (status) {
++			ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
++			disable_irq(ctrl->irq.num);
++			return status;
++		}
++
++		ctrl->irq.wakeup_enabled = true;
++	} else {
++		ctrl->irq.wakeup_enabled = false;
++	}
++
++	return 0;
++}
++
++/**
++ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
++ * @ctrl: The controller for which the IRQ should be disarmed.
++ *
++ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
++ *
++ * This function is intended to disarm the IRQ after exiting S2idle suspend.
++ *
++ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
++ * be balanced.
++ */
++void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
++{
++	int status;
++
++	if (ctrl->irq.wakeup_enabled) {
++		status = disable_irq_wake(ctrl->irq.num);
++		if (status)
++			ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
++
++		ctrl->irq.wakeup_enabled = false;
++	}
++	disable_irq(ctrl->irq.num);
++}
+diff --git a/drivers/misc/surface_aggregator/controller.h b/drivers/misc/surface_aggregator/controller.h
+new file mode 100644
+index 000000000000..96e2b87a25d9
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/controller.h
+@@ -0,0 +1,288 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Main SSAM/SSH controller structure and functionality.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
++#define _SURFACE_AGGREGATOR_CONTROLLER_H
++
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/serdev.h>
++#include <linux/spinlock.h>
++#include <linux/srcu.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "ssh_request_layer.h"
++
++
++/* -- Safe counters. -------------------------------------------------------- */
++
++/**
++ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
++ * @value: The current counter value.
++ */
++struct ssh_seq_counter {
++	u8 value;
++};
++
++/**
++ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
++ * @value: The current counter value.
++ */
++struct ssh_rqid_counter {
++	u16 value;
++};
++
++
++/* -- Event/notification system. -------------------------------------------- */
++
++/**
++ * struct ssam_nf_head - Notifier head for SSAM events.
++ * @srcu: The SRCU struct for synchronization.
++ * @head: Head-pointer for the single-linked list of notifier blocks registered
++ *        under this head.
++ */
++struct ssam_nf_head {
++	struct srcu_struct srcu;
++	struct ssam_notifier_block __rcu *head;
++};
++
++/**
++ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
++ * @lock:     Lock guarding (de-)registration of notifier blocks. Note: This
++ *            lock does not need to be held for notifier calls, only
++ *            registration and deregistration.
++ * @refcount: The root of the RB-tree used for reference-counting enabled
++ *            events/notifications.
++ * @head:     The list of notifier heads for event/notifiaction callbacks.
++ */
++struct ssam_nf {
++	struct mutex lock;
++	struct rb_root refcount;
++	struct ssam_nf_head head[SSH_NUM_EVENTS];
++};
++
++
++/* -- Event/async request completion system. -------------------------------- */
++
++struct ssam_cplt;
++
++/**
++ * struct ssam_event_item - Struct for event queuing and completion.
++ * @node:     The node in the queue.
++ * @rqid:     The request ID of the event.
++ * @ops:      Instance specific functions.
++ * @ops.free: Callback for freeing this event item.
++ * @event:    Actual event data.
++ */
++struct ssam_event_item {
++	struct list_head node;
++	u16 rqid;
++
++	struct {
++		void (*free)(struct ssam_event_item *event);
++	} ops;
++
++	struct ssam_event event;	// must be last
++};
++
++/**
++ * struct ssam_event_queue - Queue for completing received events.
++ * @cplt: Reference to the completion system on which this queue is active.
++ * @lock: The lock for any operation on the queue.
++ * @head: The list-head of the queue.
++ * @work: The &struct work_struct performing completion work for this queue.
++ */
++struct ssam_event_queue {
++	struct ssam_cplt *cplt;
++
++	spinlock_t lock;
++	struct list_head head;
++	struct work_struct work;
++};
++
++/**
++ * struct ssam_event_target - Set of queues for a single SSH target ID.
++ * @queue: The array of queues, one queue per event ID.
++ */
++struct ssam_event_target {
++	struct ssam_event_queue queue[SSH_NUM_EVENTS];
++};
++
++/**
++ * struct ssam_cplt - SSAM event/async request completion system.
++ * @dev:          The device with which this system is associated. Only used
++ *                for logging.
++ * @wq:           The &struct workqueue_struct on which all completion work
++ *                items are queued.
++ * @event:        Event completion management.
++ * @event.target: Array of &struct ssam_event_target, one for each target.
++ * @event.notif:  Notifier callbacks and event activation reference counting.
++ */
++struct ssam_cplt {
++	struct device *dev;
++	struct workqueue_struct *wq;
++
++	struct {
++		struct ssam_event_target target[SSH_NUM_TARGETS];
++		struct ssam_nf notif;
++	} event;
++};
++
++
++/* -- Main SSAM device structures. ------------------------------------------ */
++
++/**
++ * enum ssam_controller_state - State values for &struct ssam_controller.
++ * @SSAM_CONTROLLER_UNINITIALIZED:
++ *	The controller has not been initialized yet or has been deinitialized.
++ * @SSAM_CONTROLLER_INITIALIZED:
++ *	The controller is initialized, but has not been started yet.
++ * @SSAM_CONTROLLER_STARTED:
++ *	The controller has been started and is ready to use.
++ * @SSAM_CONTROLLER_STOPPED:
++ *	The controller has been stopped.
++ * @SSAM_CONTROLLER_SUSPENDED:
++ *	The controller has been suspended.
++ */
++enum ssam_controller_state {
++	SSAM_CONTROLLER_UNINITIALIZED,
++	SSAM_CONTROLLER_INITIALIZED,
++	SSAM_CONTROLLER_STARTED,
++	SSAM_CONTROLLER_STOPPED,
++	SSAM_CONTROLLER_SUSPENDED,
++};
++
++/**
++ * struct ssam_controller_caps - Controller device capabilities.
++ * @ssh_power_profile:             SSH power profile.
++ * @ssh_buffer_size:               SSH driver UART buffer size.
++ * @screen_on_sleep_idle_timeout:  SAM UART screen-on sleep idle timeout.
++ * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout.
++ * @d3_closes_handle:              SAM closes UART handle in D3.
++ *
++ * Controller and SSH device capabilities found in ACPI.
++ */
++struct ssam_controller_caps {
++	u32 ssh_power_profile;
++	u32 ssh_buffer_size;
++	u32 screen_on_sleep_idle_timeout;
++	u32 screen_off_sleep_idle_timeout;
++	u32 d3_closes_handle:1;
++};
++
++/**
++ * struct ssam_controller - SSAM controller device.
++ * @kref:  Reference count of the controller.
++ * @lock:  Main lock for the controller, used to guard state changes.
++ * @state: Controller state.
++ * @rtl:   Request transport layer for SSH I/O.
++ * @cplt:  Completion system for SSH/SSAM events and asynchronous requests.
++ * @counter:      Safe SSH message ID counters.
++ * @counter.seq:  Sequence ID counter.
++ * @counter.rqid: Request ID counter.
++ * @irq:          Wakeup IRQ resources.
++ * @irq.num:      The wakeup IRQ number.
++ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
++ * @caps: The controller device capabilities.
++ */
++struct ssam_controller {
++	struct kref kref;
++
++	struct rw_semaphore lock;
++	enum ssam_controller_state state;
++
++	struct ssh_rtl rtl;
++	struct ssam_cplt cplt;
++
++	struct {
++		struct ssh_seq_counter seq;
++		struct ssh_rqid_counter rqid;
++	} counter;
++
++	struct {
++		int num;
++		bool wakeup_enabled;
++	} irq;
++
++	struct ssam_controller_caps caps;
++};
++
++#define to_ssam_controller(ptr, member) \
++	container_of(ptr, struct ssam_controller, member)
++
++#define ssam_dbg(ctrl, fmt, ...)  rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++#define ssam_err(ctrl, fmt, ...)  rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
++
++
++/**
++ * ssam_controller_receive_buf() - Provide input-data to the controller.
++ * @ctrl: The controller.
++ * @buf:  The input buffer.
++ * @n:    The number of bytes in the input buffer.
++ *
++ * Provide input data to be evaluated by the controller, which has been
++ * received via the lower-level transport.
++ *
++ * Return: Returns the number of bytes consumed, or, if the packet transport
++ * layer of the controller has been shut down, %-ESHUTDOWN.
++ */
++static inline
++int ssam_controller_receive_buf(struct ssam_controller *ctrl,
++				const unsigned char *buf, size_t n)
++{
++	return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
++}
++
++/**
++ * ssam_controller_write_wakeup() - Notify the controller that the underlying
++ * device has space available for data to be written.
++ * @ctrl: The controller.
++ */
++static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
++{
++	ssh_ptl_tx_wakeup(&ctrl->rtl.ptl);
++}
++
++
++int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
++int ssam_controller_start(struct ssam_controller *ctrl);
++void ssam_controller_shutdown(struct ssam_controller *ctrl);
++void ssam_controller_destroy(struct ssam_controller *ctrl);
++
++int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
++void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
++
++int ssam_irq_setup(struct ssam_controller *ctrl);
++void ssam_irq_free(struct ssam_controller *ctrl);
++int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
++void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
++
++void ssam_controller_lock(struct ssam_controller *c);
++void ssam_controller_unlock(struct ssam_controller *c);
++
++int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version);
++int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
++int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
++
++int ssam_controller_suspend(struct ssam_controller *ctrl);
++int ssam_controller_resume(struct ssam_controller *ctrl);
++
++int ssam_event_item_cache_init(void);
++void ssam_event_item_cache_destroy(void);
++
++#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
+diff --git a/drivers/misc/surface_aggregator/core.c b/drivers/misc/surface_aggregator/core.c
+new file mode 100644
+index 000000000000..6aaac82b9091
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/core.c
+@@ -0,0 +1,831 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Surface Serial Hub (SSH) driver for communication with the Surface/System
++ * Aggregator Module (SSAM/SAM).
++ *
++ * Provides access to a SAM-over-SSH connected EC via a controller device.
++ * Handles communication via requests as well as enabling, disabling, and
++ * relaying of events.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/gpio/consumer.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/module.h>
++#include <linux/pm.h>
++#include <linux/serdev.h>
++#include <linux/sysfs.h>
++
++#include <linux/surface_aggregator/controller.h>
++
++#include "bus.h"
++#include "controller.h"
++
++#define CREATE_TRACE_POINTS
++#include "trace.h"
++
++
++/* -- Static controller reference. ------------------------------------------ */
++
++/*
++ * Main controller reference. The corresponding lock must be held while
++ * accessing (reading/writing) the reference.
++ */
++static struct ssam_controller *__ssam_controller;
++static DEFINE_SPINLOCK(__ssam_controller_lock);
++
++/**
++ * ssam_get_controller() - Get reference to SSAM controller.
++ *
++ * Returns a reference to the SSAM controller of the system or %NULL if there
++ * is none, it hasn't been set up yet, or it has already been unregistered.
++ * This function automatically increments the reference count of the
++ * controller, thus the calling party must ensure that ssam_controller_put()
++ * is called when it doesn't need the controller any more.
++ */
++struct ssam_controller *ssam_get_controller(void)
++{
++	struct ssam_controller *ctrl;
++
++	spin_lock(&__ssam_controller_lock);
++
++	ctrl = __ssam_controller;
++	if (!ctrl)
++		goto out;
++
++	if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
++		ctrl = NULL;
++
++out:
++	spin_unlock(&__ssam_controller_lock);
++	return ctrl;
++}
++EXPORT_SYMBOL_GPL(ssam_get_controller);
++
++/**
++ * ssam_try_set_controller() - Try to set the main controller reference.
++ * @ctrl: The controller to which the reference should point.
++ *
++ * Set the main controller reference to the given pointer if the reference
++ * hasn't been set already.
++ *
++ * Return: Returns zero on success or %-EBUSY if the reference has already
++ * been set.
++ */
++static int ssam_try_set_controller(struct ssam_controller *ctrl)
++{
++	int status = 0;
++
++	spin_lock(&__ssam_controller_lock);
++	if (!__ssam_controller)
++		__ssam_controller = ctrl;
++	else
++		status = -EBUSY;
++	spin_unlock(&__ssam_controller_lock);
++
++	return status;
++}
++
++/**
++ * ssam_clear_controller() - Remove/clear the main controller reference.
++ *
++ * Clears the main controller reference, i.e. sets it to %NULL. This function
++ * should be called before the controller is shut down.
++ */
++static void ssam_clear_controller(void)
++{
++	spin_lock(&__ssam_controller_lock);
++	__ssam_controller = NULL;
++	spin_unlock(&__ssam_controller_lock);
++}
++
++
++/**
++ * ssam_client_link() - Link an arbitrary client device to the controller.
++ * @c: The controller to link to.
++ * @client: The client device.
++ *
++ * Link an arbitrary client device to the controller by creating a device link
++ * between it as consumer and the controller device as provider. This function
++ * can be used for non-SSAM devices (or SSAM devices not registered as child
++ * under the controller) to guarantee that the controller is valid for as long
++ * as the driver of the client device is bound, and that proper suspend and
++ * resume ordering is guaranteed.
++ *
++ * The device link does not have to be destructed manually. It is removed
++ * automatically once the driver of the client device unbinds.
++ *
++ * Return: Returns zero on success, %-ENXIO if the controller is not ready or
++ * going to be removed soon, or %-ENOMEM if the device link could not be
++ * created for other reasons.
++ */
++int ssam_client_link(struct ssam_controller *c, struct device *client)
++{
++	const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
++	struct device_link *link;
++	struct device *ctrldev;
++
++	ssam_controller_statelock(c);
++
++	if (c->state != SSAM_CONTROLLER_STARTED) {
++		ssam_controller_stateunlock(c);
++		return -ENXIO;
++	}
++
++	ctrldev = ssam_controller_device(c);
++	if (!ctrldev) {
++		ssam_controller_stateunlock(c);
++		return -ENXIO;
++	}
++
++	link = device_link_add(client, ctrldev, flags);
++	if (!link) {
++		ssam_controller_stateunlock(c);
++		return -ENOMEM;
++	}
++
++	/*
++	 * Return -ENXIO if supplier driver is on its way to be removed. In this
++	 * case, the controller won't be around for much longer and the device
++	 * link is not going to save us any more, as unbinding is already in
++	 * progress.
++	 */
++	if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
++		ssam_controller_stateunlock(c);
++		return -ENXIO;
++	}
++
++	ssam_controller_stateunlock(c);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ssam_client_link);
++
++/**
++ * ssam_client_bind() - Bind an arbitrary client device to the controller.
++ * @client: The client device.
++ * @ctrl: A pointer to where the controller reference should be returned.
++ *
++ * Link an arbitrary client device to the controller by creating a device link
++ * between it as consumer and the main controller device as provider. This
++ * function can be used for non-SSAM devices to guarantee that the controller
++ * returned by this function is valid for as long as the driver of the client
++ * device is bound, and that proper suspend and resume ordering is guaranteed.
++ *
++ * This function does essentially the same as ssam_client_link(), except that
++ * it first fetches the main controller reference, then creates the link, and
++ * finally returns this reference in the @ctrl parameter. Note that this
++ * function does not increment the reference counter of the controller, as,
++ * due to the link, the controller lifetime is assured as long as the driver
++ * of the client device is bound.
++ *
++ * It is not valid to use the controller reference obtained by this method
++ * outside of the driver bound to the client device at the time of calling
++ * this function, without first incrementing the reference count of the
++ * controller via ssam_controller_get(). Even after doing this, care must be
++ * taken that requests are only submitted and notifiers are only
++ * (un-)registered when the controller is active and not suspended. In other
++ * words: The device link only lives as long as the client driver is bound and
++ * any guarantees enforced by this link (e.g. active controller state) can
++ * only be relied upon as long as this link exists and may need to be enforced
++ * in other ways afterwards.
++ *
++ * The created device link does not have to be destructed manually. It is
++ * removed automatically once the driver of the client device unbinds.
++ *
++ * Return: Returns zero on success, %-ENXIO if the controller is not present,
++ * not ready or going to be removed soon, or %-ENOMEM if the device link could
++ * not be created for other reasons.
++ */
++int ssam_client_bind(struct device *client, struct ssam_controller **ctrl)
++{
++	struct ssam_controller *c;
++	int status;
++
++	c = ssam_get_controller();
++	if (!c)
++		return -ENXIO;
++
++	status = ssam_client_link(c, client);
++
++	/*
++	 * Note that we can drop our controller reference in both success and
++	 * failure cases: On success, we have bound the controller lifetime
++	 * inherently to the client driver lifetime, i.e. it the controller is
++	 * now guaranteed to outlive the client driver. On failure, we're not
++	 * going to use the controller any more.
++	 */
++	ssam_controller_put(c);
++
++	*ctrl = status == 0 ? c : NULL;
++	return status;
++}
++EXPORT_SYMBOL_GPL(ssam_client_bind);
++
++
++/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
++
++static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
++			    size_t n)
++{
++	struct ssam_controller *ctrl;
++
++	ctrl = serdev_device_get_drvdata(dev);
++	return ssam_controller_receive_buf(ctrl, buf, n);
++}
++
++static void ssam_write_wakeup(struct serdev_device *dev)
++{
++	ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
++}
++
++static const struct serdev_device_ops ssam_serdev_ops = {
++	.receive_buf = ssam_receive_buf,
++	.write_wakeup = ssam_write_wakeup,
++};
++
++
++/* -- SysFS and misc. ------------------------------------------------------- */
++
++static int ssam_log_firmware_version(struct ssam_controller *ctrl)
++{
++	u32 version, a, b, c;
++	int status;
++
++	status = ssam_get_firmware_version(ctrl, &version);
++	if (status)
++		return status;
++
++	a = (version >> 24) & 0xff;
++	b = ((version >> 8) & 0xffff);
++	c = version & 0xff;
++
++	ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
++	return 0;
++}
++
++static ssize_t firmware_version_show(struct device *dev,
++				     struct device_attribute *attr, char *buf)
++{
++	struct ssam_controller *ctrl = dev_get_drvdata(dev);
++	u32 version, a, b, c;
++	int status;
++
++	status = ssam_get_firmware_version(ctrl, &version);
++	if (status < 0)
++		return status;
++
++	a = (version >> 24) & 0xff;
++	b = ((version >> 8) & 0xffff);
++	c = version & 0xff;
++
++	return snprintf(buf, PAGE_SIZE - 1, "%u.%u.%u\n", a, b, c);
++}
++static DEVICE_ATTR_RO(firmware_version);
++
++static struct attribute *ssam_sam_attrs[] = {
++	&dev_attr_firmware_version.attr,
++	NULL,
++};
++
++static const struct attribute_group ssam_sam_group = {
++	.name = "sam",
++	.attrs = ssam_sam_attrs,
++};
++
++
++/* -- ACPI based device setup. ---------------------------------------------- */
++
++static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
++						  void *ctx)
++{
++	struct serdev_device *serdev = ctx;
++	struct acpi_resource_common_serialbus *serial;
++	struct acpi_resource_uart_serialbus *uart;
++	bool flow_control;
++	int status = 0;
++
++	if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
++		return AE_OK;
++
++	serial = &rsc->data.common_serial_bus;
++	if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
++		return AE_OK;
++
++	uart = &rsc->data.uart_serial_bus;
++
++	// set up serdev device
++	serdev_device_set_baudrate(serdev, uart->default_baud_rate);
++
++	// serdev currently only supports RTSCTS flow control
++	if (uart->flow_control & (~((u8) ACPI_UART_FLOW_CONTROL_HW))) {
++		dev_warn(&serdev->dev, "setup: unsupported flow control (value: 0x%02x)\n",
++			 uart->flow_control);
++	}
++
++	// set RTSCTS flow control
++	flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
++	serdev_device_set_flow_control(serdev, flow_control);
++
++	// serdev currently only supports EVEN/ODD parity
++	switch (uart->parity) {
++	case ACPI_UART_PARITY_NONE:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
++		break;
++	case ACPI_UART_PARITY_EVEN:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
++		break;
++	case ACPI_UART_PARITY_ODD:
++		status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
++		break;
++	default:
++		dev_warn(&serdev->dev, "setup: unsupported parity (value: 0x%02x)\n",
++			 uart->parity);
++		break;
++	}
++
++	if (status) {
++		dev_err(&serdev->dev, "setup: failed to set parity (value: 0x%02x,"
++			" error: %d)\n", uart->parity, status);
++		return AE_ERROR;
++	}
++
++	return AE_CTRL_TERMINATE;       // we've found the resource and are done
++}
++
++static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
++					      struct serdev_device *serdev)
++{
++	return acpi_walk_resources(handle, METHOD_NAME__CRS,
++				   ssam_serdev_setup_via_acpi_crs, serdev);
++}
++
++
++/* -- Power management. ----------------------------------------------------- */
++
++static void ssam_serial_hub_shutdown(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to disable notifiers, signal display-off and D0-exit, ignore any
++	 * errors.
++	 *
++	 * Note: It has not been established yet if this is actually
++	 * necessary/useful for shutdown.
++	 */
++
++	status = ssam_notifier_disable_registered(c);
++	if (status) {
++		ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
++			 status);
++	}
++
++	status = ssam_ctrl_notif_display_off(c);
++	if (status)
++		ssam_err(c, "pm: display-off notification failed: %d\n", status);
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status)
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++}
++
++#ifdef CONFIG_PM_SLEEP
++
++static int ssam_serial_hub_pm_prepare(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal display-off, This will quiesce events.
++	 *
++	 * Note: Signalling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available, signal
++	 * it here.
++	 */
++
++	status = ssam_ctrl_notif_display_off(c);
++	if (status)
++		ssam_err(c, "pm: display-off notification failed: %d\n", status);
++
++	return status;
++}
++
++static void ssam_serial_hub_pm_complete(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal display-on. This will restore events.
++	 *
++	 * Note: Signalling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available, signal
++	 * it here.
++	 */
++
++	status = ssam_ctrl_notif_display_on(c);
++	if (status)
++		ssam_err(c, "pm: display-on notification failed: %d\n", status);
++}
++
++static int ssam_serial_hub_pm_suspend(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
++	 * error.
++	 */
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		goto err_notif;
++	}
++
++	status = ssam_irq_arm_for_wakeup(c);
++	if (status)
++		goto err_irq;
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++
++err_irq:
++	ssam_ctrl_notif_d0_entry(c);
++err_notif:
++	ssam_ctrl_notif_display_on(c);
++	return status;
++}
++
++static int ssam_serial_hub_pm_resume(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	WARN_ON(ssam_controller_resume(c));
++
++	/*
++	 * Try to disable IRQ wakeup (if specified) and signal D0-entry. In
++	 * case of errors, log them and try to restore normal operation state
++	 * as far as possible.
++	 *
++	 * Note: Signalling display-off/display-on should normally be done from
++	 * some sort of display state notifier. As that is not available, signal
++	 * it here.
++	 */
++
++	ssam_irq_disarm_wakeup(c);
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
++
++	return 0;
++}
++
++static int ssam_serial_hub_pm_freeze(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * During hibernation image creation, we only have to ensure that the
++	 * EC doesn't send us any events. This is done via the display-off
++	 * and D0-exit notifications. Note that this sets up the wakeup IRQ
++	 * on the EC side, however, we have disabled it by default on our side
++	 * and won't enable it here.
++	 *
++	 * See ssam_serial_hub_poweroff() for more details on the hibernation
++	 * process.
++	 */
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		ssam_ctrl_notif_display_on(c);
++		return status;
++	}
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++}
++
++static int ssam_serial_hub_pm_thaw(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	WARN_ON(ssam_controller_resume(c));
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++
++	return status;
++}
++
++static int ssam_serial_hub_pm_poweroff(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * When entering hibernation and powering off the system, the EC, at
++	 * least on some models, may disable events. Without us taking care of
++	 * that, this leads to events not being enabled/restored when the
++	 * system resumes from hibernation, resulting SAM-HID subsystem devices
++	 * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
++	 * gone, etc.
++	 *
++	 * To avoid these issues, we disable all registered events here (this is
++	 * likely not actually required) and restore them during the drivers PM
++	 * restore callback.
++	 *
++	 * Wakeup from the EC interrupt is not supported during hibernation,
++	 * so don't arm the IRQ here.
++	 */
++
++	status = ssam_notifier_disable_registered(c);
++	if (status) {
++		ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
++			 status);
++		return status;
++	}
++
++	status = ssam_ctrl_notif_d0_exit(c);
++	if (status) {
++		ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
++		ssam_notifier_restore_registered(c);
++		return status;
++	}
++
++	WARN_ON(ssam_controller_suspend(c));
++	return 0;
++}
++
++static int ssam_serial_hub_pm_restore(struct device *dev)
++{
++	struct ssam_controller *c = dev_get_drvdata(dev);
++	int status;
++
++	/*
++	 * Ignore but log errors, try to restore state as much as possible in
++	 * case of failures. See ssam_serial_hub_poweroff() for more details on
++	 * the hibernation process.
++	 */
++
++	WARN_ON(ssam_controller_resume(c));
++
++	status = ssam_ctrl_notif_d0_entry(c);
++	if (status)
++		ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
++
++	ssam_notifier_restore_registered(c);
++	return 0;
++}
++
++static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
++	.prepare  = ssam_serial_hub_pm_prepare,
++	.complete = ssam_serial_hub_pm_complete,
++	.suspend  = ssam_serial_hub_pm_suspend,
++	.resume   = ssam_serial_hub_pm_resume,
++	.freeze   = ssam_serial_hub_pm_freeze,
++	.thaw     = ssam_serial_hub_pm_thaw,
++	.poweroff = ssam_serial_hub_pm_poweroff,
++	.restore  = ssam_serial_hub_pm_restore,
++};
++
++#else /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
++
++#endif /* CONFIG_PM_SLEEP */
++
++
++/* -- Device/driver setup. -------------------------------------------------- */
++
++static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
++static const struct acpi_gpio_params gpio_ssam_wakeup     = { 1, 0, false };
++
++static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
++	{ "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
++	{ "ssam_wakeup-gpio",     &gpio_ssam_wakeup,     1 },
++	{ },
++};
++
++static int ssam_serial_hub_probe(struct serdev_device *serdev)
++{
++	struct ssam_controller *ctrl;
++	acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
++	acpi_status astatus;
++	int status;
++
++	if (gpiod_count(&serdev->dev, NULL) < 0)
++		return -ENODEV;
++
++	status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
++	if (status)
++		return status;
++
++	// allocate controller
++	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
++	if (!ctrl)
++		return -ENOMEM;
++
++	// initialize controller
++	status = ssam_controller_init(ctrl, serdev);
++	if (status)
++		goto err_ctrl_init;
++
++	// set up serdev device
++	serdev_device_set_drvdata(serdev, ctrl);
++	serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
++	status = serdev_device_open(serdev);
++	if (status)
++		goto err_devopen;
++
++	astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
++	if (ACPI_FAILURE(astatus)) {
++		status = -ENXIO;
++		goto err_devinit;
++	}
++
++	// start controller
++	status = ssam_controller_start(ctrl);
++	if (status)
++		goto err_devinit;
++
++	// initial SAM requests: log version, notify default/init power states
++	status = ssam_log_firmware_version(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = ssam_ctrl_notif_d0_entry(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = ssam_ctrl_notif_display_on(ctrl);
++	if (status)
++		goto err_initrq;
++
++	status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
++	if (status)
++		goto err_initrq;
++
++	// setup IRQ
++	status = ssam_irq_setup(ctrl);
++	if (status)
++		goto err_irq;
++
++	// finally, set main controller reference
++	status = ssam_try_set_controller(ctrl);
++	if (WARN_ON(status))	// currently, we're the only provider
++		goto err_mainref;
++
++	/*
++	 * TODO: The EC can wake up the system via the associated GPIO interrupt
++	 *       in multiple situations. One of which is the remaining battery
++	 *       capacity falling below a certain threshold. Normally, we should
++	 *       use the device_init_wakeup function, however, the EC also seems
++	 *       to have other reasons for waking up the system and it seems
++	 *       that Windows has additional checks whether the system should be
++	 *       resumed. In short, this causes some spurious unwanted wake-ups.
++	 *       For now let's thus default power/wakeup to false.
++	 */
++	device_set_wakeup_capable(&serdev->dev, true);
++	acpi_walk_dep_device_list(ssh);
++
++	return 0;
++
++err_mainref:
++	ssam_irq_free(ctrl);
++err_irq:
++	sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
++err_initrq:
++	ssam_controller_shutdown(ctrl);
++err_devinit:
++	serdev_device_close(serdev);
++err_devopen:
++	ssam_controller_destroy(ctrl);
++err_ctrl_init:
++	kfree(ctrl);
++	return status;
++}
++
++static void ssam_serial_hub_remove(struct serdev_device *serdev)
++{
++	struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
++	int status;
++
++	// clear static reference, so that no one else can get a new one
++	ssam_clear_controller();
++
++	ssam_irq_free(ctrl);
++	sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
++	ssam_controller_lock(ctrl);
++
++	// remove all client devices
++	ssam_controller_remove_clients(ctrl);
++
++	// act as if suspending to disable events
++	status = ssam_ctrl_notif_display_off(ctrl);
++	if (status) {
++		dev_err(&serdev->dev, "display-off notification failed: %d\n",
++			status);
++	}
++
++	status = ssam_ctrl_notif_d0_exit(ctrl);
++	if (status) {
++		dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
++			status);
++	}
++
++	// shut down controller and remove serdev device reference from it
++	ssam_controller_shutdown(ctrl);
++
++	// shut down actual transport
++	serdev_device_wait_until_sent(serdev, 0);
++	serdev_device_close(serdev);
++
++	// drop our controller reference
++	ssam_controller_unlock(ctrl);
++	ssam_controller_put(ctrl);
++
++	device_set_wakeup_capable(&serdev->dev, false);
++}
++
++
++static const struct acpi_device_id ssam_serial_hub_match[] = {
++	{ "MSHW0084", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
++
++static struct serdev_device_driver ssam_serial_hub = {
++	.probe = ssam_serial_hub_probe,
++	.remove = ssam_serial_hub_remove,
++	.driver = {
++		.name = "surface_serial_hub",
++		.acpi_match_table = ssam_serial_hub_match,
++		.pm = &ssam_serial_hub_pm_ops,
++		.shutdown = ssam_serial_hub_shutdown,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	},
++};
++
++
++/* -- Module setup. --------------------------------------------------------- */
++
++static int __init ssam_core_init(void)
++{
++	int status;
++
++	status = ssam_bus_register();
++	if (status)
++		goto err_bus;
++
++	status = ssh_ctrl_packet_cache_init();
++	if (status)
++		goto err_cpkg;
++
++	status = ssam_event_item_cache_init();
++	if (status)
++		goto err_evitem;
++
++	status = serdev_device_driver_register(&ssam_serial_hub);
++	if (status)
++		goto err_register;
++
++	return 0;
++
++err_register:
++	ssam_event_item_cache_destroy();
++err_evitem:
++	ssh_ctrl_packet_cache_destroy();
++err_cpkg:
++	ssam_bus_unregister();
++err_bus:
++	return status;
++}
++module_init(ssam_core_init);
++
++static void __exit ssam_core_exit(void)
++{
++	serdev_device_driver_unregister(&ssam_serial_hub);
++	ssam_event_item_cache_destroy();
++	ssh_ctrl_packet_cache_destroy();
++	ssam_bus_unregister();
++}
++module_exit(ssam_core_exit);
++
++MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
++MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/misc/surface_aggregator/ssh_msgb.h b/drivers/misc/surface_aggregator/ssh_msgb.h
+new file mode 100644
+index 000000000000..7c29e7d7028a
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_msgb.h
+@@ -0,0 +1,201 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH message builder functions.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
++#define _SURFACE_AGGREGATOR_SSH_MSGB_H
++
++#include <asm/unaligned.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++#include <linux/surface_aggregator/serial_hub.h>
++
++
++/**
++ * struct msgbuf - Buffer struct to construct SSH messages.
++ * @begin: Pointer to the beginning of the allocated buffer space.
++ * @end:   Pointer to the end (one past last element) of the allocated buffer
++ *         space.
++ * @ptr:   Pointer to the first free element in the buffer.
++ */
++struct msgbuf {
++	u8 *begin;
++	u8 *end;
++	u8 *ptr;
++};
++
++/**
++ * msgb_init() - Initialize the given message buffer struct.
++ * @msgb: The buffer struct to initialize
++ * @ptr:  Pointer to the underlying memory by which the buffer will be backed.
++ * @cap:  Size of the underlying memory.
++ *
++ * Initialize the given message buffer struct using the provided memory as
++ * backing.
++ */
++static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
++{
++	msgb->begin = ptr;
++	msgb->end = ptr + cap;
++	msgb->ptr = ptr;
++}
++
++/**
++ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
++ * @msgb: The message buffer.
++ */
++static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
++{
++	return msgb->ptr - msgb->begin;
++}
++
++/**
++ * msgb_push_u16() - Push a u16 value to the buffer.
++ * @msgb:  The message buffer.
++ * @value: The value to push to the buffer.
++ */
++static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
++{
++	if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
++		return;
++
++	put_unaligned_le16(value, msgb->ptr);
++	msgb->ptr += sizeof(u16);
++}
++
++/**
++ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
++ * @msgb: The message buffer.
++ */
++static inline void msgb_push_syn(struct msgbuf *msgb)
++{
++	msgb_push_u16(msgb, SSH_MSG_SYN);
++}
++
++/**
++ * msgb_push_buf() - Push raw data to the buffer.
++ * @msgb: The message buffer.
++ * @buf:  The data to push to the buffer.
++ * @len:  The length of the data to push to the buffer.
++ */
++static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
++{
++	msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
++}
++
++/**
++ * msgb_push_crc() - Compute CRC and push it to the buffer.
++ * @msgb: The message buffer.
++ * @buf:  The data for which the CRC should be computed.
++ * @len:  The length of the data for which the CRC should be computed.
++ */
++static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
++{
++	msgb_push_u16(msgb, ssh_crc(buf, len));
++}
++
++/**
++ * msgb_push_frame() - Push a SSH message frame header to the buffer.
++ * @msgb: The message buffer
++ * @ty:   The type of the frame.
++ * @len:  The length of the payload of the frame.
++ * @seq:  The sequence ID of the frame/packet.
++ */
++static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
++{
++	struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr;
++	const u8 *const begin = msgb->ptr;
++
++	if (WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end))
++		return;
++
++	frame->type = ty;
++	put_unaligned_le16(len, &frame->len);
++	frame->seq  = seq;
++
++	msgb->ptr += sizeof(*frame);
++	msgb_push_crc(msgb, begin, msgb->ptr - begin);
++}
++
++/**
++ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
++ * @msgb: The message buffer
++ * @seq:  The sequence ID of the frame/packet to be ACKed.
++ */
++static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
++{
++	// SYN
++	msgb_push_syn(msgb);
++
++	// ACK-type frame + CRC
++	msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
++
++	// payload CRC (ACK-type frames do not have a payload)
++	msgb_push_crc(msgb, msgb->ptr, 0);
++}
++
++/**
++ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
++ * @msgb: The message buffer
++ */
++static inline void msgb_push_nak(struct msgbuf *msgb)
++{
++	// SYN
++	msgb_push_syn(msgb);
++
++	// NAK-type frame + CRC
++	msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
++
++	// payload CRC (ACK-type frames do not have a payload)
++	msgb_push_crc(msgb, msgb->ptr, 0);
++}
++
++/**
++ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
++ * @msgb: The message buffer.
++ * @seq:  The sequence ID (SEQ) of the frame/packet.
++ * @rqid: The request ID (RQID) of the request contained in the frame.
++ * @rqst: The request to wrap in the frame.
++ */
++static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
++				 const struct ssam_request *rqst)
++{
++	struct ssh_command *cmd;
++	const u8 *cmd_begin;
++	const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
++
++	// SYN
++	msgb_push_syn(msgb);
++
++	// command frame + crc
++	msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->length, seq);
++
++	// frame payload: command struct + payload
++	if (WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end))
++		return;
++
++	cmd_begin = msgb->ptr;
++	cmd = (struct ssh_command *)msgb->ptr;
++
++	cmd->type    = SSH_PLD_TYPE_CMD;
++	cmd->tc      = rqst->target_category;
++	cmd->tid_out = rqst->target_id;
++	cmd->tid_in  = 0x00;
++	cmd->iid     = rqst->instance_id;
++	put_unaligned_le16(rqid, &cmd->rqid);
++	cmd->cid     = rqst->command_id;
++
++	msgb->ptr += sizeof(*cmd);
++
++	// command payload
++	msgb_push_buf(msgb, rqst->payload, rqst->length);
++
++	// crc for command struct + payload
++	msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin);
++}
++
++#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */
+diff --git a/drivers/misc/surface_aggregator/ssh_packet_layer.c b/drivers/misc/surface_aggregator/ssh_packet_layer.c
+new file mode 100644
+index 000000000000..2d72e8c02842
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_packet_layer.c
+@@ -0,0 +1,2009 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH packet transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/atomic.h>
++#include <linux/error-injection.h>
++#include <linux/jiffies.h>
++#include <linux/kfifo.h>
++#include <linux/kref.h>
++#include <linux/kthread.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/serdev.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include "ssh_msgb.h"
++#include "ssh_packet_layer.h"
++#include "ssh_parser.h"
++
++#include "trace.h"
++
++
++/*
++ * To simplify reasoning about the code below, we define a few concepts. The
++ * system below is similar to a state-machine for packets, however, there are
++ * too many states to explicitly write them down. To (somewhat) manage the
++ * states and packets we rely on flags, reference counting, and some simple
++ * concepts. State transitions are triggered by actions.
++ *
++ * >> Actions <<
++ *
++ * - submit
++ * - transmission start (process next item in queue)
++ * - transmission finished (guaranteed to never be parallel to transmission
++ *   start)
++ * - ACK received
++ * - NAK received (this is equivalent to issuing re-submit for all pending
++ *   packets)
++ * - timeout (this is equivalent to re-issuing a submit or canceling)
++ * - cancel (non-pending and pending)
++ *
++ * >> Data Structures, Packet Ownership, General Overview <<
++ *
++ * The code below employs two main data structures: The packet queue,
++ * containing all packets scheduled for transmission, and the set of pending
++ * packets, containing all packets awaiting an ACK.
++ *
++ * Shared ownership of a packet is controlled via reference counting. Inside
++ * the transport system are a total of five packet owners:
++ *
++ * - the packet queue,
++ * - the pending set,
++ * - the transmitter thread,
++ * - the receiver thread (via ACKing), and
++ * - the timeout work item.
++ *
++ * Normal operation is as follows: The initial reference of the packet is
++ * obtained by submitting the packet and queueing it. The receiver thread
++ * takes packets from the queue. By doing this, it does not increment the
++ * refcount but takes over the reference (removing it from the queue). If the
++ * packet is sequenced (i.e. needs to be ACKed by the client), the transmitter
++ * thread sets-up the timeout and adds the packet to the pending set before
++ * starting to transmit it. As the timeout is handled by a reaper task, no
++ * additional reference for it is needed. After the transmit is done, the
++ * reference held by the transmitter thread is dropped. If the packet is
++ * unsequenced (i.e. does not need an ACK), the packet is completed by the
++ * transmitter thread before dropping that reference.
++ *
++ * On receival of an ACK, the receiver thread removes and obtains the
++ * reference to the packet from the pending set. The receiver thread will then
++ * complete the packet and drop its reference.
++ *
++ * On receival of a NAK, the receiver thread re-submits all currently pending
++ * packets.
++ *
++ * Packet timeouts are detected by the timeout reaper. This is a task,
++ * scheduled depending on the earliest packet timeout expiration date,
++ * checking all currently pending packets if their timeout has expired. If the
++ * timeout of a packet has expired, it is re-submitted and the number of tries
++ * of this packet is incremented. If this number reaches its limit, the packet
++ * will be completed with a failure.
++ *
++ * On transmission failure (such as repeated packet timeouts), the completion
++ * callback is immediately run by on thread on which the error was detected.
++ *
++ * To ensure that a packet eventually leaves the system it is marked as
++ * "locked" directly before it is going to be completed or when it is
++ * canceled. Marking a packet as "locked" has the effect that passing and
++ * creating new references of the packet is disallowed. This means that the
++ * packet cannot be added to the queue, the pending set, and the timeout, or
++ * be picked up by the transmitter thread or receiver thread. To remove a
++ * packet from the system it has to be marked as locked and subsequently all
++ * references from the data structures (queue, pending) have to be removed.
++ * References held by threads will eventually be dropped automatically as
++ * their execution progresses.
++ *
++ * Note that the packet completion callback is, in case of success and for a
++ * sequenced packet, guaranteed to run on the receiver thread, thus providing
++ * a way to reliably identify responses to the packet. The packet completion
++ * callback is only run once and it does not indicate that the packet has
++ * fully left the system (for this, one should rely on the release method,
++ * triggered when the reference count of the packet reaches zero). In case of
++ * re-submission (and with somewhat unlikely timing), it may be possible that
++ * the packet is being re-transmitted while the completion callback runs.
++ * Completion will occur both on success and internal error, as well as when
++ * the packet is canceled.
++ *
++ * >> Flags <<
++ *
++ * Flags are used to indicate the state and progression of a packet. Some flags
++ * have stricter guarantees than other:
++ *
++ * - locked
++ *   Indicates if the packet is locked. If the packet is locked, passing and/or
++ *   creating additional references to the packet is forbidden. The packet thus
++ *   may not be queued, dequeued, or removed or added to the pending set. Note
++ *   that the packet state flags may still change (e.g. it may be marked as
++ *   ACKed, transmitted, ...).
++ *
++ * - completed
++ *   Indicates if the packet completion callback has been executed or is about
++ *   to be executed. This flag is used to ensure that the packet completion
++ *   callback is only run once.
++ *
++ * - queued
++ *   Indicates if a packet is present in the submission queue or not. This flag
++ *   must only be modified with the queue lock held, and must be coherent to the
++ *   presence of the packet in the queue.
++ *
++ * - pending
++ *   Indicates if a packet is present in the set of pending packets or not.
++ *   This flag must only be modified with the pending lock held, and must be
++ *   coherent to the presence of the packet in the pending set.
++ *
++ * - transmitting
++ *   Indicates if the packet is currently transmitting. In case of
++ *   re-transmissions, it is only safe to wait on the "transmitted" completion
++ *   after this flag has been set. The completion will be set both in success
++ *   and error case.
++ *
++ * - transmitted
++ *   Indicates if the packet has been transmitted. This flag is not cleared by
++ *   the system, thus it indicates the first transmission only.
++ *
++ * - acked
++ *   Indicates if the packet has been acknowledged by the client. There are no
++ *   other guarantees given. For example, the packet may still be canceled
++ *   and/or the completion may be triggered an error even though this bit is
++ *   set. Rely on the status provided to the completion callback instead.
++ *
++ * - canceled
++ *   Indicates if the packet has been canceled from the outside. There are no
++ *   other guarantees given. Specifically, the packet may be completed by
++ *   another part of the system before the cancellation attempts to complete it.
++ *
++ * >> General Notes <<
++ *
++ * To avoid deadlocks, if both queue and pending locks are required, the
++ * pending lock must be acquired before the queue lock.
++ */
++
++/*
++ * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
++ *
++ * Maximum number of transmission attempts per sequenced packet in case of
++ * time-outs. Must be smaller than 16. If the packet times out after this
++ * amount of tries, the packet will be completed with %-ETIMEDOUT as status
++ * code.
++ */
++#define SSH_PTL_MAX_PACKET_TRIES		3
++
++/*
++ * SSH_PTL_PACKET_TIMEOUT - Packet timeout.
++ *
++ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
++ * time-frame after starting transmission, the packet will be re-submitted.
++ */
++#define SSH_PTL_PACKET_TIMEOUT			ms_to_ktime(1000)
++
++/*
++ * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
++ *
++ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
++ * direct re-scheduling of reaper work_struct.
++ */
++#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION	ms_to_ktime(max(2000 / HZ, 50))
++
++/*
++ * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
++ *
++ * Maximum number of sequenced packets concurrently waiting for an ACK.
++ * Packets marked as blocking will not be transmitted while this limit is
++ * reached.
++ */
++#define SSH_PTL_MAX_PENDING			1
++
++/*
++ * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
++ */
++#define SSH_PTL_RX_BUF_LEN			4096
++
++/*
++ * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
++ */
++#define SSH_PTL_RX_FIFO_LEN			4096
++
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
++
++/**
++ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
++ *
++ * Useful to test detection and handling of automated re-transmits by the EC.
++ * Specifically of packets that the EC consideres not-ACKed but the driver
++ * already consideres ACKed (due to dropped ACK). In this case, the EC
++ * re-transmits the packet-to-be-ACKed and the driver should detect it as
++ * duplicate/already handled. Note that the driver should still send an ACK
++ * for the re-transmitted packet.
++ */
++static noinline bool ssh_ptl_should_drop_ack_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
++
++/**
++ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
++ *
++ * Useful to test/force automated (timeout-based) re-transmit by the EC.
++ * Specifically, packets that have not reached the driver completely/with valid
++ * checksums. Only useful in combination with receival of (injected) bad data.
++ */
++static noinline bool ssh_ptl_should_drop_nak_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
++
++/**
++ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
++ * data packet.
++ *
++ * Useful to test re-transmit timeout of the driver. If the data packet has not
++ * been ACKed after a certain time, the driver should re-transmit the packet up
++ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
++ */
++static noinline bool ssh_ptl_should_drop_dsq_packet(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
++
++/**
++ * ssh_ptl_should_fail_write() - Error injection hook to make
++ * serdev_device_write() fail.
++ *
++ * Hook to simulate errors in serdev_device_write when transmitting packets.
++ */
++static noinline int ssh_ptl_should_fail_write(void)
++{
++	return 0;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
++
++/**
++ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simualte invalid
++ * data being sent to the EC.
++ *
++ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
++ * Causes the packet data to be actively corrupted by overwriting it with
++ * pre-defined values, such that it becomes invalid, causing the EC to respond
++ * with a NAK packet. Useful to test handling of NAK packets received by the
++ * driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_tx_data(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
++
++/**
++ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
++ * data being sent by the EC.
++ *
++ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
++ * test handling thereof in the driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
++
++/**
++ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
++ * data being sent by the EC.
++ *
++ * Hook to simulate invalid data/checksum of the message frame and test handling
++ * thereof in the driver.
++ */
++static noinline bool ssh_ptl_should_corrupt_rx_data(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
++
++
++static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_ack_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_ack_packet(packet);
++	ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_nak_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_nak_packet(packet);
++	ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
++{
++	if (likely(!ssh_ptl_should_drop_dsq_packet()))
++		return false;
++
++	trace_ssam_ei_tx_drop_dsq_packet(packet);
++	ptl_info(packet->ptl,
++		"packet error injection: dropping sequenced data packet %p\n",
++		 packet);
++
++	return true;
++}
++
++static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
++{
++	// ignore packets that don't carry any data (i.e. flush)
++	if (!packet->data.ptr || !packet->data.len)
++		return false;
++
++	switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
++	case SSH_FRAME_TYPE_ACK:
++		return __ssh_ptl_should_drop_ack_packet(packet);
++
++	case SSH_FRAME_TYPE_NAK:
++		return __ssh_ptl_should_drop_nak_packet(packet);
++
++	case SSH_FRAME_TYPE_DATA_SEQ:
++		return __ssh_ptl_should_drop_dsq_packet(packet);
++
++	default:
++		return false;
++	}
++}
++
++static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
++			     const unsigned char *buf, size_t count)
++{
++	int status;
++
++	status = ssh_ptl_should_fail_write();
++	if (unlikely(status)) {
++		trace_ssam_ei_tx_fail_write(packet, status);
++		ptl_info(packet->ptl,
++			 "packet error injection: simulating transmit error %d,"
++			 " packet %p\n", status, packet);
++
++		return status;
++	}
++
++	return serdev_device_write_buf(ptl->serdev, buf, count);
++}
++
++static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
++{
++	// ignore packets that don't carry any data (i.e. flush)
++	if (!packet->data.ptr || !packet->data.len)
++		return;
++
++	// only allow sequenced data packets to be modified
++	if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_tx_data()))
++		return;
++
++	trace_ssam_ei_tx_corrupt_data(packet);
++	ptl_info(packet->ptl,
++		 "packet error injection: simulating invalid transmit data on packet %p\n",
++		 packet);
++
++	/*
++	 * NB: The value 0xb3 has been chosen more or less randomly so that it
++	 * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
++	 * non-trivial (i.e. non-zero, non-0xff).
++	 */
++	memset(packet->data.ptr, 0xb3, packet->data.len);
++}
++
++static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
++					  struct ssam_span *data)
++{
++	struct ssam_span frame;
++
++	// check if there actually is something to corrupt
++	if (!sshp_find_syn(data, &frame))
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_rx_syn()))
++		return;
++
++	trace_ssam_ei_rx_corrupt_syn("data_length", data->len);
++
++	data->ptr[1] = 0xb3;	// set second byte of SYN to "random" value
++}
++
++static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
++					   struct ssam_span *frame)
++{
++	size_t payload_len, message_len;
++	struct ssh_frame *sshf;
++
++	// ignore incomplete messages, will get handled once it's complete
++	if (frame->len < SSH_MESSAGE_LENGTH(0))
++		return;
++
++	// ignore incomplete messages, part 2
++	payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
++	message_len = SSH_MESSAGE_LENGTH(payload_len);
++	if (frame->len < message_len)
++		return;
++
++	if (likely(!ssh_ptl_should_corrupt_rx_data()))
++		return;
++
++	sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
++	trace_ssam_ei_rx_corrupt_data(sshf);
++
++	/*
++	 * Flip bits in first byte of payload checksum. This is basically
++	 * equivalent to a payload/frame data error without us having to worry
++	 * about (the, arguably pretty small, probability of) accidental
++	 * checksum collisions.
++	 */
++	frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
++}
++
++#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
++
++static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
++{
++	return false;
++}
++
++static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
++				    struct ssh_packet *packet,
++				    const unsigned char *buf,
++				    size_t count)
++{
++	return serdev_device_write_buf(ptl->serdev, buf, count);
++}
++
++static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
++{
++}
++
++static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
++						 struct ssam_span *data)
++{
++}
++
++static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
++						  struct ssam_span *frame)
++{
++}
++
++#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
++
++
++static void __ssh_ptl_packet_release(struct kref *kref)
++{
++	struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
++
++	trace_ssam_packet_release(p);
++
++	ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
++	p->ops->release(p);
++}
++
++/**
++ * ssh_packet_get() - Increment reference count of packet.
++ * @packet: The packet to increment the reference count of.
++ *
++ * Increments the reference count of the given packet. See ssh_packet_put()
++ * for the counter-part of this function.
++ *
++ * Return: Returns the packet provided as input.
++ */
++struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
++{
++	if (packet)
++		kref_get(&packet->refcnt);
++	return packet;
++}
++EXPORT_SYMBOL_GPL(ssh_packet_get);
++
++/**
++ * ssh_packet_put() - Decrement reference count of packet.
++ * @packet: The packet to decrement the reference count of.
++ *
++ * If the reference count reaches zero, the ``release`` callback specified in
++ * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
++ * called.
++ *
++ * See ssh_packet_get() for the counter-part of this function.
++ */
++void ssh_packet_put(struct ssh_packet *packet)
++{
++	if (packet)
++		kref_put(&packet->refcnt, __ssh_ptl_packet_release);
++}
++EXPORT_SYMBOL_GPL(ssh_packet_put);
++
++static u8 ssh_packet_get_seq(struct ssh_packet *packet)
++{
++	return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
++}
++
++
++/**
++ * ssh_packet_init() - Initialize SSH packet.
++ * @packet:   The packet to initialize.
++ * @type:     Type-flags of the packet.
++ * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
++ * @ops:      Packet operations.
++ *
++ * Initializes the given SSH packet. Sets the transmission buffer pointer to
++ * %NULL and the transmission buffer length to zero. For data-type packets,
++ * this buffer has to be set separately via ssh_packet_set_data() before
++ * submission, and must contain a valid SSH message, i.e. frame with optional
++ * payload of any type.
++ */
++void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
++		     u8 priority, const struct ssh_packet_ops *ops)
++{
++	kref_init(&packet->refcnt);
++
++	packet->ptl = NULL;
++	INIT_LIST_HEAD(&packet->queue_node);
++	INIT_LIST_HEAD(&packet->pending_node);
++
++	packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
++	packet->priority = priority;
++	packet->timestamp = KTIME_MAX;
++
++	packet->data.ptr = NULL;
++	packet->data.len = 0;
++
++	packet->ops = ops;
++}
++
++
++static struct kmem_cache *ssh_ctrl_packet_cache;
++
++/**
++ * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
++ */
++int ssh_ctrl_packet_cache_init(void)
++{
++	const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
++	const unsigned int align = __alignof__(struct ssh_packet);
++	struct kmem_cache *cache;
++
++	cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
++	if (!cache)
++		return -ENOMEM;
++
++	ssh_ctrl_packet_cache = cache;
++	return 0;
++}
++
++/**
++ * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
++ */
++void ssh_ctrl_packet_cache_destroy(void)
++{
++	kmem_cache_destroy(ssh_ctrl_packet_cache);
++	ssh_ctrl_packet_cache = NULL;
++}
++
++/**
++ * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
++ * @packet: Where the pointer to the newly allocated packet should be stored.
++ * @buffer: The buffer corresponding to this packet.
++ * @flags:  Flags used for allocation.
++ *
++ * Allocates a packet and corresponding transport buffer from the control
++ * packet cache. Sets the packet's buffer reference to the allocated buffer.
++ * The packet must be freed via ssh_ctrl_packet_free(), which will also free
++ * the corresponding buffer. The corresponding buffer must not be freed
++ * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
++ * operations.
++ *
++ * Return: Returns zero on success, %-ENOMEM if the allocation failed.
++ */
++static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
++				 struct ssam_span *buffer, gfp_t flags)
++{
++	*packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
++	if (!*packet)
++		return -ENOMEM;
++
++	buffer->ptr = (u8 *)(*packet + 1);
++	buffer->len = SSH_MSG_LEN_CTRL;
++
++	trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
++	return 0;
++}
++
++/**
++ * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
++ * @p: The packet to free.
++ */
++static void ssh_ctrl_packet_free(struct ssh_packet *p)
++{
++	trace_ssam_ctrl_packet_free(p);
++	kmem_cache_free(ssh_ctrl_packet_cache, p);
++}
++
++static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
++	.complete = NULL,
++	.release = ssh_ctrl_packet_free,
++};
++
++
++static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
++				       ktime_t expires)
++{
++	unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
++	ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
++	ktime_t old_exp, old_act;
++
++	// re-adjust / schedule reaper if it is above resolution delta
++	old_act = READ_ONCE(ptl->rtx_timeout.expires);
++	if (ktime_after(aexp, old_act))
++		return;
++
++	do {
++		old_exp = old_act;
++		old_act = cmpxchg64(&ptl->rtx_timeout.expires, old_exp, expires);
++	} while (old_exp != old_act && ktime_before(aexp, old_act));
++
++	// if we updated the reaper expiration, modify work timeout
++	if (old_exp == old_act && old_act != expires)
++		mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
++}
++
++static void ssh_ptl_timeout_start(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++	ktime_t timestamp = ktime_get_coarse_boottime();
++	ktime_t timeout = ptl->rtx_timeout.timeout;
++
++	if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
++		return;
++
++	WRITE_ONCE(packet->timestamp, timestamp);
++	/*
++	 * Ensure timestamp is set before starting the reaper. Paired with
++	 * implicit barrier following check on ssh_packet_get_expiration() in
++	 * ssh_ptl_timeout_reap().
++	 */
++	smp_mb__after_atomic();
++
++	ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout);
++}
++
++
++static void ssh_packet_next_try(struct ssh_packet *p)
++{
++	u8 priority = READ_ONCE(p->priority);
++	u8 base = ssh_packet_priority_get_base(priority);
++	u8 try = ssh_packet_priority_get_try(priority);
++
++	WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
++}
++
++/* must be called with queue lock held */
++static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
++{
++	struct list_head *head;
++	u8 priority = READ_ONCE(p->priority);
++
++	/*
++	 * We generally assume that there are less control (ACK/NAK) packets and
++	 * re-submitted data packets as there are normal data packets (at least
++	 * in situations in which many packets are queued; if there aren't many
++	 * packets queued the decision on how to iterate should be basically
++	 * irrellevant; the number of control/data packets is more or less
++	 * limited via the maximum number of pending packets). Thus, when
++	 * inserting a control or re-submitted data packet, (determined by their
++	 * priority), we search from front to back. Normal data packets are,
++	 * usually queued directly at the tail of the queue, so for those search
++	 * from back to front.
++	 */
++
++	if (priority > SSH_PACKET_PRIORITY(DATA, 0)) {
++		list_for_each(head, &p->ptl->queue.head) {
++			p = list_entry(head, struct ssh_packet, queue_node);
++
++			if (READ_ONCE(p->priority) < priority)
++				break;
++		}
++	} else {
++		list_for_each_prev(head, &p->ptl->queue.head) {
++			p = list_entry(head, struct ssh_packet, queue_node);
++
++			if (READ_ONCE(p->priority) >= priority) {
++				head = head->next;
++				break;
++			}
++		}
++	}
++
++
++	return head;
++}
++
++/* must be called with queue lock held */
++static int __ssh_ptl_queue_push(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++	struct list_head *head;
++
++
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return -ESHUTDOWN;
++
++	// avoid further transitions when cancelling/completing
++	if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
++		return -EINVAL;
++
++	// if this packet has already been queued, do not add it
++	if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
++		return -EALREADY;
++
++	head = __ssh_ptl_queue_find_entrypoint(packet);
++
++	list_add_tail(&ssh_packet_get(packet)->queue_node, head);
++	return 0;
++}
++
++static int ssh_ptl_queue_push(struct ssh_packet *packet)
++{
++	int status;
++
++	spin_lock(&packet->ptl->queue.lock);
++	status = __ssh_ptl_queue_push(packet);
++	spin_unlock(&packet->ptl->queue.lock);
++
++	return status;
++}
++
++static void ssh_ptl_queue_remove(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	spin_lock(&ptl->queue.lock);
++
++	if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
++		spin_unlock(&ptl->queue.lock);
++		return;
++	}
++
++	list_del(&packet->queue_node);
++
++	spin_unlock(&ptl->queue.lock);
++	ssh_packet_put(packet);
++}
++
++
++static void ssh_ptl_pending_push(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	spin_lock(&ptl->pending.lock);
++
++	// if we are cancelling/completing this packet, do not add it
++	if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
++		spin_unlock(&ptl->pending.lock);
++		return;
++	}
++
++	// in case it is already pending (e.g. re-submission), do not add it
++	if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
++		spin_unlock(&ptl->pending.lock);
++		return;
++	}
++
++	atomic_inc(&ptl->pending.count);
++	list_add_tail(&ssh_packet_get(packet)->pending_node, &ptl->pending.head);
++
++	spin_unlock(&ptl->pending.lock);
++}
++
++static void ssh_ptl_pending_remove(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	spin_lock(&ptl->pending.lock);
++
++	if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
++		spin_unlock(&ptl->pending.lock);
++		return;
++	}
++
++	list_del(&packet->pending_node);
++	atomic_dec(&ptl->pending.count);
++
++	spin_unlock(&ptl->pending.lock);
++
++	ssh_packet_put(packet);
++}
++
++
++/* warning: does not check/set "completed" bit */
++static void __ssh_ptl_complete(struct ssh_packet *p, int status)
++{
++	struct ssh_ptl *ptl = READ_ONCE(p->ptl);
++
++	trace_ssam_packet_complete(p, status);
++	ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
++
++	if (p->ops->complete)
++		p->ops->complete(p, status);
++}
++
++static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
++{
++	/*
++	 * A call to this function should in general be preceded by
++	 * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
++	 * packet to the structures it's going to be removed from.
++	 *
++	 * The set_bit call does not need explicit memory barriers as the
++	 * implicit barrier of the test_and_set_bit() call below ensure that the
++	 * flag is visible before we actually attempt to remove the packet.
++	 */
++
++	if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++		return;
++
++	ssh_ptl_queue_remove(p);
++	ssh_ptl_pending_remove(p);
++
++	__ssh_ptl_complete(p, status);
++}
++
++
++static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
++		return !atomic_read(&ptl->pending.count);
++
++	// we can alwas process non-blocking packets
++	if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
++		return true;
++
++	// if we are already waiting for this packet, send it again
++	if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
++		return true;
++
++	// otherwise: check if we have the capacity to send
++	return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
++}
++
++static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *packet = ERR_PTR(-ENOENT);
++	struct ssh_packet *p, *n;
++
++	spin_lock(&ptl->queue.lock);
++	list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
++		/*
++		 * If we are cancelling or completing this packet, ignore it.
++		 * It's going to be removed from this queue shortly.
++		 */
++		if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++			continue;
++
++		/*
++		 * Packets should be ordered non-blocking/to-be-resent first.
++		 * If we cannot process this packet, assume that we can't
++		 * process any following packet either and abort.
++		 */
++		if (!ssh_ptl_tx_can_process(p)) {
++			packet = ERR_PTR(-EBUSY);
++			break;
++		}
++
++		/*
++		 * We are allowed to change the state now. Remove it from the
++		 * queue and mark it as being transmitted. Note that we cannot
++		 * add it to the set of pending packets yet, as queue locks must
++		 * always be acquired before packet locks (otherwise we might
++		 * run into a deadlock).
++		 */
++
++		list_del(&p->queue_node);
++
++		set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
++		// ensure that state never gets zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
++
++		packet = p;
++		break;
++	}
++	spin_unlock(&ptl->queue.lock);
++
++	return packet;
++}
++
++static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *p;
++
++	p = ssh_ptl_tx_pop(ptl);
++	if (IS_ERR(p))
++		return p;
++
++	if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
++		ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
++		ssh_ptl_pending_push(p);
++		ssh_ptl_timeout_start(p);
++	} else {
++		ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
++	}
++
++	/*
++	 * Update number of tries. This directly influences the priority in case
++	 * the packet is re-submitted (e.g. via timeout/NAK). Note that this is
++	 * the only place where we update the priority in-flight. As this runs
++	 * only on the tx-thread, this read-modify-write procedure is safe.
++	 */
++	ssh_packet_next_try(p);
++
++	return p;
++}
++
++static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
++{
++	struct ssh_ptl *ptl = packet->ptl;
++
++	ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
++
++	// transition state to "transmitted"
++	set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
++	// ensure that state never gets zero
++	smp_mb__before_atomic();
++	clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
++
++	// if the packet is unsequenced, we're done: lock and complete
++	if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
++		ssh_ptl_remove_and_complete(packet, 0);
++	}
++
++	/*
++	 * Notify that a packet transmission has finished. In general we're only
++	 * waiting for one packet (if any), so wake_up_all should be fine.
++	 */
++	wake_up_all(&ptl->tx.packet_wq);
++}
++
++static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
++{
++	// transmission failure: lock the packet and try to complete it
++	set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
++	// ensure that state never gets zero
++	smp_mb__before_atomic();
++	clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
++
++	ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
++	ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
++
++	ssh_ptl_remove_and_complete(packet, status);
++
++	/*
++	 * Notify that a packet transmission has finished. In general we're only
++	 * waiting for one packet (if any), so wake_up_all should be fine.
++	 */
++	wake_up_all(&packet->ptl->tx.packet_wq);
++}
++
++static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl)
++{
++	wait_event_interruptible(ptl->tx.thread_wq,
++		READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop());
++	WRITE_ONCE(ptl->tx.thread_signal, false);
++}
++
++static int ssh_ptl_tx_threadfn(void *data)
++{
++	struct ssh_ptl *ptl = data;
++
++	while (!kthread_should_stop()) {
++		unsigned char *buf;
++		bool drop = false;
++		size_t len = 0;
++		int status = 0;
++
++		// if we don't have a packet, get the next and add it to pending
++		if (IS_ERR_OR_NULL(ptl->tx.packet)) {
++			ptl->tx.packet = ssh_ptl_tx_next(ptl);
++			ptl->tx.offset = 0;
++
++			// if no packet can be processed, we are done
++			if (IS_ERR(ptl->tx.packet)) {
++				ssh_ptl_tx_threadfn_wait(ptl);
++				continue;
++			}
++		}
++
++		// error injection: drop packet to simulate transmission problem
++		if (ptl->tx.offset == 0)
++			drop = ssh_ptl_should_drop_packet(ptl->tx.packet);
++
++		// error injection: simulate invalid packet data
++		if (ptl->tx.offset == 0 && !drop)
++			ssh_ptl_tx_inject_invalid_data(ptl->tx.packet);
++
++		// note: flush-packets don't have any data
++		if (likely(ptl->tx.packet->data.ptr && !drop)) {
++			buf = ptl->tx.packet->data.ptr + ptl->tx.offset;
++			len = ptl->tx.packet->data.len - ptl->tx.offset;
++
++			ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len);
++			print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
++					     buf, len, false);
++
++			status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len);
++		}
++
++		if (status < 0) {
++			// complete packet with error
++			ssh_ptl_tx_compl_error(ptl->tx.packet, status);
++			ssh_packet_put(ptl->tx.packet);
++			ptl->tx.packet = NULL;
++
++		} else if (status == len) {
++			// complete packet and/or mark as transmitted
++			ssh_ptl_tx_compl_success(ptl->tx.packet);
++			ssh_packet_put(ptl->tx.packet);
++			ptl->tx.packet = NULL;
++
++		} else {	// need more buffer space
++			ptl->tx.offset += status;
++			ssh_ptl_tx_threadfn_wait(ptl);
++		}
++	}
++
++	// cancel active packet before we actually stop
++	if (!IS_ERR_OR_NULL(ptl->tx.packet)) {
++		ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN);
++		ssh_packet_put(ptl->tx.packet);
++		ptl->tx.packet = NULL;
++	}
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_tx_wakeup() - Wake up packet transmitter thread.
++ * @ptl: The packet transport layer.
++ *
++ * Wakes up the packet transmitter thread. If the packet transport layer has
++ * been shut down, calls to this function will be ignored.
++ */
++void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl)
++{
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return;
++
++	WRITE_ONCE(ptl->tx.thread_signal, true);
++	/*
++	 * Ensure that the signal is set before we wake the transmitter
++	 * thread to prevent lost updates: If the signal is not set,
++	 * when the thread checks it in ssh_ptl_tx_threadfn_wait(), it
++	 * may go back to sleep.
++	 */
++	smp_mb__after_atomic();
++	wake_up(&ptl->tx.thread_wq);
++}
++
++/**
++ * ssh_ptl_tx_start() - Start packet transmitter thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_tx_start(struct ssh_ptl *ptl)
++{
++	ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl,
++				     "ssam_serial_hub-tx");
++	if (IS_ERR(ptl->tx.thread))
++		return PTR_ERR(ptl->tx.thread);
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_tx_stop() - Stop packet transmitter thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
++{
++	int status = 0;
++
++	if (ptl->tx.thread) {
++		status = kthread_stop(ptl->tx.thread);
++		ptl->tx.thread = NULL;
++	}
++
++	return status;
++}
++
++
++static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
++{
++	struct ssh_packet *packet = ERR_PTR(-ENOENT);
++	struct ssh_packet *p, *n;
++
++	spin_lock(&ptl->pending.lock);
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		/*
++		 * We generally expect packets to be in order, so first packet
++		 * to be added to pending is first to be sent, is first to be
++		 * ACKed.
++		 */
++		if (unlikely(ssh_packet_get_seq(p) != seq_id))
++			continue;
++
++		/*
++		 * In case we receive an ACK while handling a transmission
++		 * error completion. The packet will be removed shortly.
++		 */
++		if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
++			packet = ERR_PTR(-EPERM);
++			break;
++		}
++
++		/*
++		 * Mark the packet as ACKed and remove it from pending by
++		 * removing its node and decrementing the pending counter.
++		 */
++		set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
++		// ensure that state never gets zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&ptl->pending.count);
++		list_del(&p->pending_node);
++		packet = p;
++
++		break;
++	}
++	spin_unlock(&ptl->pending.lock);
++
++	return packet;
++}
++
++static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
++{
++	wait_event(packet->ptl->tx.packet_wq,
++		   test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state)
++		   || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
++}
++
++static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
++{
++	struct ssh_packet *p;
++	int status = 0;
++
++	p = ssh_ptl_ack_pop(ptl, seq);
++	if (IS_ERR(p)) {
++		if (PTR_ERR(p) == -ENOENT) {
++			/*
++			 * The packet has not been found in the set of pending
++			 * packets.
++			 */
++			ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
++		} else {
++			/*
++			 * The packet is pending, but we are not allowed to take
++			 * it because it has been locked.
++			 */
++			WARN_ON(PTR_ERR(p) != -EPERM);
++		}
++		return;
++	}
++
++	ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
++
++	/*
++	 * It is possible that the packet has been transmitted, but the state
++	 * has not been updated from "transmitting" to "transmitted" yet.
++	 * In that case, we need to wait for this transition to occur in order
++	 * to determine between success or failure.
++	 */
++	if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state))
++		ssh_ptl_wait_until_transmitted(p);
++
++	/*
++	 * The packet will already be locked in case of a transmission error or
++	 * cancellation. Let the transmitter or cancellation issuer complete the
++	 * packet.
++	 */
++	if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
++		ssh_packet_put(p);
++		return;
++	}
++
++	if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) {
++		ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
++		status = -EREMOTEIO;
++	}
++
++	ssh_ptl_remove_and_complete(p, status);
++	ssh_packet_put(p);
++
++	if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
++		ssh_ptl_tx_wakeup(ptl);
++}
++
++
++/**
++ * ssh_ptl_submit() - Submit a packet to the transport layer.
++ * @ptl: The packet transport layer to submit the packet to.
++ * @p:   The packet to submit.
++ *
++ * Submits a new packet to the transport layer, queuing it to be sent. This
++ * function should not be used for re-submission.
++ *
++ * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
++ * the packet has been canceled prior to submission, %-EALREADY if the packet
++ * has already been submitted, or %-ESHUTDOWN if the packet transport layer
++ * has been shut down.
++ */
++int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
++{
++	struct ssh_ptl *ptl_old;
++	int status;
++
++	trace_ssam_packet_submit(p);
++
++	// validate packet fields
++	if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
++		if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
++			return -EINVAL;
++	} else if (!p->data.ptr) {
++		return -EINVAL;
++	}
++
++	/*
++	 * The ptl reference only gets set on or before the first submission.
++	 * After the first submission, it has to be read-only.
++	 */
++	ptl_old = READ_ONCE(p->ptl);
++	if (ptl_old == NULL)
++		WRITE_ONCE(p->ptl, ptl);
++	else if (WARN_ON(ptl_old != ptl))
++		return -EALREADY;	// submitted on different PTL
++
++	status = ssh_ptl_queue_push(p);
++	if (status)
++		return status;
++
++	if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state)
++	    || (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
++		ssh_ptl_tx_wakeup(ptl);
++
++	return 0;
++}
++
++/* must be called with pending lock held */
++static int __ssh_ptl_resubmit(struct ssh_packet *packet)
++{
++	int status;
++
++	trace_ssam_packet_resubmit(packet);
++
++	spin_lock(&packet->ptl->queue.lock);
++
++	status = __ssh_ptl_queue_push(packet);
++	if (status) {
++		/*
++		 * An error here indicates that the packet has either already
++		 * been queued, been locked, or the transport layer is being
++		 * shut down. In all cases: Ignore the error.
++		 */
++		spin_unlock(&packet->ptl->queue.lock);
++		return status;
++	}
++
++	/*
++	 * Reset the timestamp. This must be called and executed before the
++	 * pending lock is released. The lock release should be a sufficient
++	 * barrier for this operation, thus there is no need to manually add
++	 * one here.
++	 */
++	WRITE_ONCE(packet->timestamp, KTIME_MAX);
++
++	spin_unlock(&packet->ptl->queue.lock);
++	return 0;
++}
++
++static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *p;
++	bool resub = false;
++	u8 try;
++
++	/*
++	 * Note: We deliberately do not remove/attempt to cancel and complete
++	 * packets that are out of tires in this function. The packet will be
++	 * eventually canceled and completed by the timeout. Removing the packet
++	 * here could lead to overly eager cancellation if the packet has not
++	 * been re-transmitted yet but the tries-counter already updated (i.e
++	 * ssh_ptl_tx_next() removed the packet from the queue and updated the
++	 * counter, but re-transmission for the last try has not actually
++	 * started yet).
++	 */
++
++	spin_lock(&ptl->pending.lock);
++
++	// re-queue all pending packets
++	list_for_each_entry(p, &ptl->pending.head, pending_node) {
++		// avoid further transitions if locked
++		if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++			continue;
++
++		// do not re-schedule if packet is out of tries
++		try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
++		if (try >= SSH_PTL_MAX_PACKET_TRIES)
++			continue;
++
++		/*
++		 * Submission fails if the packet has been locked, is already
++		 * queued, or the layer is being shut down. No need to
++		 * re-schedule tx-thread in those cases.
++		 */
++		if (!__ssh_ptl_resubmit(p))
++			resub = true;
++	}
++
++	spin_unlock(&ptl->pending.lock);
++
++	if (resub)
++		ssh_ptl_tx_wakeup(ptl);
++}
++
++/**
++ * ssh_ptl_cancel() - Cancel a packet.
++ * @p: The packet to cancel.
++ *
++ * Cancels a packet. There are no guarantees on when completion and release
++ * callbacks will be called. This may occur during execution of this function
++ * or may occur at any point later.
++ *
++ * Note that it is not guaranteed that the packet will actually be cancelled
++ * if the packet is concurrently completed by another process. The only
++ * guarantee of this function is that the packet will be completed (with
++ * success, failure, or cancellation) and released from the transport layer in
++ * a reasonable time-frame.
++ *
++ * May be called before the packet has been submitted, in which case any later
++ * packet submission fails.
++ */
++void ssh_ptl_cancel(struct ssh_packet *p)
++{
++	if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
++		return;
++
++	trace_ssam_packet_cancel(p);
++
++	/*
++	 * Lock packet and commit with memory barrier. If this packet has
++	 * already been locked, it's going to be removed and completed by
++	 * another party, which should have precedence.
++	 */
++	if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++		return;
++
++	/*
++	 * By marking the packet as locked and employing the implicit memory
++	 * barrier of test_and_set_bit, we have guaranteed that, at this point,
++	 * the packet cannot be added to the queue any more.
++	 *
++	 * In case the packet has never been submitted, packet->ptl is NULL. If
++	 * the packet is currently being submitted, packet->ptl may be NULL or
++	 * non-NULL. Due marking the packet as locked above and committing with
++	 * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
++	 * the packet will never be added to the queue. If packet->ptl is
++	 * non-NULL, we don't have any guarantees.
++	 */
++
++	if (READ_ONCE(p->ptl)) {
++		ssh_ptl_remove_and_complete(p, -ECANCELED);
++
++		if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
++			ssh_ptl_tx_wakeup(p->ptl);
++
++	} else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
++		__ssh_ptl_complete(p, -ECANCELED);
++	}
++}
++
++
++static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
++{
++	ktime_t timestamp = READ_ONCE(p->timestamp);
++
++	if (timestamp != KTIME_MAX)
++		return ktime_add(timestamp, timeout);
++	else
++		return KTIME_MAX;
++}
++
++static void ssh_ptl_timeout_reap(struct work_struct *work)
++{
++	struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
++	struct ssh_packet *p, *n;
++	LIST_HEAD(claimed);
++	ktime_t now = ktime_get_coarse_boottime();
++	ktime_t timeout = ptl->rtx_timeout.timeout;
++	ktime_t next = KTIME_MAX;
++	bool resub = false;
++
++	trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count));
++
++	/*
++	 * Mark reaper as "not pending". This is done before checking any
++	 * packets to avoid lost-update type problems.
++	 */
++	WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX);
++	/*
++	 * Ensure that the reaper is marked as deactivated before we continue
++	 * checking packets to prevent lost-update problems when a packet is
++	 * added to the pending set and ssh_ptl_timeout_reaper_mod is called
++	 * during execution of the part below.
++	 */
++	smp_mb__after_atomic();
++
++	spin_lock(&ptl->pending.lock);
++
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		ktime_t expires = ssh_packet_get_expiration(p, timeout);
++		u8 try;
++
++		/*
++		 * Check if the timeout hasn't expired yet. Find out next
++		 * expiration date to be handled after this run.
++		 */
++		if (ktime_after(expires, now)) {
++			next = ktime_before(expires, next) ? expires : next;
++			continue;
++		}
++
++		// check if we still have some tries left
++		try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
++		if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) {
++			trace_ssam_packet_timeout(p);
++
++			/*
++			 * Submission fails if the packet has been locked, is
++			 * already queued, or the layer is being shut down.
++			 * No need to re-schedule tx-thread in those cases.
++			 */
++			if (!__ssh_ptl_resubmit(p))
++				resub = true;
++
++			continue;
++		}
++
++		// no more tries left: cancel the packet
++
++		// if someone else has locked the packet already, don't use it
++		if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
++			continue;
++
++		trace_ssam_packet_timeout(p);
++
++		/*
++		 * We have now marked the packet as locked. Thus it cannot be
++		 * added to the pending list again after we've removed it here.
++		 * We can therefore re-use the pending_node of this packet
++		 * temporarily.
++		 */
++
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&ptl->pending.count);
++		list_del(&p->pending_node);
++
++		list_add_tail(&p->pending_node, &claimed);
++	}
++
++	spin_unlock(&ptl->pending.lock);
++
++	// cancel and complete the packet
++	list_for_each_entry_safe(p, n, &claimed, pending_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
++			ssh_ptl_queue_remove(p);
++			__ssh_ptl_complete(p, -ETIMEDOUT);
++		}
++
++		// drop the reference we've obtained by removing it from pending
++		list_del(&p->pending_node);
++		ssh_packet_put(p);
++	}
++
++	// ensure that reaper doesn't run again immediately
++	next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
++	if (next != KTIME_MAX)
++		ssh_ptl_timeout_reaper_mod(ptl, now, next);
++
++	if (resub)
++		ssh_ptl_tx_wakeup(ptl);
++}
++
++
++static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
++{
++	int i;
++
++	// check if SEQ has been seen recently (i.e. packet was re-transmitted)
++	for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
++		if (likely(ptl->rx.blocked.seqs[i] != seq))
++			continue;
++
++		ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
++		return true;
++	}
++
++	// update list of blocked seuence IDs
++	ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
++	ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
++				  % ARRAY_SIZE(ptl->rx.blocked.seqs);
++
++	return false;
++}
++
++static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
++				 const struct ssh_frame *frame,
++				 const struct ssam_span *payload)
++{
++	if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
++		return;
++
++	ptl->ops.data_received(ptl, payload);
++}
++
++static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
++{
++	struct ssh_packet *packet;
++	struct ssam_span buf;
++	struct msgbuf msgb;
++	int status;
++
++	status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
++	if (status) {
++		ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
++		return;
++	}
++
++	ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
++			&ssh_ptl_ctrl_packet_ops);
++
++	msgb_init(&msgb, buf.ptr, buf.len);
++	msgb_push_ack(&msgb, seq);
++	ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
++
++	ssh_ptl_submit(ptl, packet);
++	ssh_packet_put(packet);
++}
++
++static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
++{
++	struct ssh_packet *packet;
++	struct ssam_span buf;
++	struct msgbuf msgb;
++	int status;
++
++	status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
++	if (status) {
++		ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
++		return;
++	}
++
++	ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
++			&ssh_ptl_ctrl_packet_ops);
++
++	msgb_init(&msgb, buf.ptr, buf.len);
++	msgb_push_nak(&msgb);
++	ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
++
++	ssh_ptl_submit(ptl, packet);
++	ssh_packet_put(packet);
++}
++
++static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
++{
++	struct ssh_frame *frame;
++	struct ssam_span payload;
++	struct ssam_span aligned;
++	bool syn_found;
++	int status;
++
++	// error injection: modify data to simulate corrupt SYN bytes
++	ssh_ptl_rx_inject_invalid_syn(ptl, source);
++
++	// find SYN
++	syn_found = sshp_find_syn(source, &aligned);
++
++	if (unlikely(aligned.ptr - source->ptr) > 0) {
++		ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
++
++		/*
++		 * Notes:
++		 * - This might send multiple NAKs in case the communication
++		 *   starts with an invalid SYN and is broken down into multiple
++		 *   pieces. This should generally be handled fine, we just
++		 *   might receive duplicate data in this case, which is
++		 *   detected when handling data frames.
++		 * - This path will also be executed on invalid CRCs: When an
++		 *   invalid CRC is encountered, the code below will skip data
++		 *   until direclty after the SYN. This causes the search for
++		 *   the next SYN, which is generally not placed directly after
++		 *   the last one.
++		 *
++		 *   Open question: Should we send this in case of invalid
++		 *   payload CRCs if the frame-type is nonsequential (current
++		 *   implementation) or should we drop that frame without
++		 *   telling the EC?
++		 */
++		ssh_ptl_send_nak(ptl);
++	}
++
++	if (unlikely(!syn_found))
++		return aligned.ptr - source->ptr;
++
++	// error injection: modify data to simulate corruption
++	ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
++
++	// parse and validate frame
++	status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
++				  SSH_PTL_RX_BUF_LEN);
++	if (status)	// invalid frame: skip to next syn
++		return aligned.ptr - source->ptr + sizeof(u16);
++	if (!frame)	// not enough data
++		return aligned.ptr - source->ptr;
++
++	trace_ssam_rx_frame_received(frame);
++
++	switch (frame->type) {
++	case SSH_FRAME_TYPE_ACK:
++		ssh_ptl_acknowledge(ptl, frame->seq);
++		break;
++
++	case SSH_FRAME_TYPE_NAK:
++		ssh_ptl_resubmit_pending(ptl);
++		break;
++
++	case SSH_FRAME_TYPE_DATA_SEQ:
++		ssh_ptl_send_ack(ptl, frame->seq);
++		fallthrough;
++
++	case SSH_FRAME_TYPE_DATA_NSQ:
++		ssh_ptl_rx_dataframe(ptl, frame, &payload);
++		break;
++
++	default:
++		ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n",
++			 frame->type);
++		break;
++	}
++
++	return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
++}
++
++static int ssh_ptl_rx_threadfn(void *data)
++{
++	struct ssh_ptl *ptl = data;
++
++	while (true) {
++		struct ssam_span span;
++		size_t offs = 0;
++		size_t n;
++
++		wait_event_interruptible(ptl->rx.wq,
++					 !kfifo_is_empty(&ptl->rx.fifo)
++					 || kthread_should_stop());
++		if (kthread_should_stop())
++			break;
++
++		// copy from fifo to evaluation buffer
++		n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
++
++		ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
++		print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
++				     ptl->rx.buf.ptr + ptl->rx.buf.len - n,
++				     n, false);
++
++		// parse until we need more bytes or buffer is empty
++		while (offs < ptl->rx.buf.len) {
++			sshp_buf_span_from(&ptl->rx.buf, offs, &span);
++			n = ssh_ptl_rx_eval(ptl, &span);
++			if (n == 0)
++				break;	// need more bytes
++
++			offs += n;
++		}
++
++		// throw away the evaluated parts
++		sshp_buf_drop(&ptl->rx.buf, offs);
++	}
++
++	return 0;
++}
++
++static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
++{
++	wake_up(&ptl->rx.wq);
++}
++
++/**
++ * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_rx_start(struct ssh_ptl *ptl)
++{
++	if (ptl->rx.thread)
++		return 0;
++
++	ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
++				     "ssam_serial_hub-rx");
++	if (IS_ERR(ptl->rx.thread))
++		return PTR_ERR(ptl->rx.thread);
++
++	return 0;
++}
++
++/**
++ * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
++{
++	int status = 0;
++
++	if (ptl->rx.thread) {
++		status = kthread_stop(ptl->rx.thread);
++		ptl->rx.thread = NULL;
++	}
++
++	return status;
++}
++
++/**
++ * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
++ * layer.
++ * @ptl: The packet transport layer.
++ * @buf: Pointer to the data to push to the layer.
++ * @n:   Size of the data to push to the layer, in bytes.
++ *
++ * Pushes data from a lower-layer transport to the receiver fifo buffer of the
++ * packet layer and notifies the reveiver thread. Calls to this function are
++ * ignored once the packet layer has been shut down.
++ *
++ * Return: Returns the number of bytes transferred (positive or zero) on
++ * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
++ */
++int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
++{
++	int used;
++
++	if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
++		return -ESHUTDOWN;
++
++	used = kfifo_in(&ptl->rx.fifo, buf, n);
++	if (used)
++		ssh_ptl_rx_wakeup(ptl);
++
++	return used;
++}
++
++
++/**
++ * ssh_ptl_shutdown() - Shut down the packet transport layer.
++ * @ptl: The packet transport layer.
++ *
++ * Shuts down the packet transport layer, removing and canceling all queued
++ * and pending packets. Packets canceled by this operation will be completed
++ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
++ * stopped.
++ *
++ * As a result of this function, the transport layer will be marked as shut
++ * down. Submission of packets after the transport layer has been shut down
++ * will fail with %-ESHUTDOWN.
++ */
++void ssh_ptl_shutdown(struct ssh_ptl *ptl)
++{
++	LIST_HEAD(complete_q);
++	LIST_HEAD(complete_p);
++	struct ssh_packet *p, *n;
++	int status;
++
++	// ensure that no new packets (including ACK/NAK) can be submitted
++	set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
++	/*
++	 * Ensure that the layer gets marked as shut-down before actually
++	 * stopping it. In combination with the check in ssh_ptl_queue_push(),
++	 * this guarantees that no new packets can be added and all already
++	 * queued packets are properly cancelled. In combination with the check
++	 * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
++	 * properly cut off.
++	 */
++	smp_mb__after_atomic();
++
++	status = ssh_ptl_rx_stop(ptl);
++	if (status)
++		ptl_err(ptl, "ptl: failed to stop receiver thread\n");
++
++	status = ssh_ptl_tx_stop(ptl);
++	if (status)
++		ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
++
++	cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
++
++	/*
++	 * At this point, all threads have been stopped. This means that the
++	 * only references to packets from inside the system are in the queue
++	 * and pending set.
++	 *
++	 * Note: We still need locks here because someone could still be
++	 * cancelling packets.
++	 *
++	 * Note 2: We can re-use queue_node (or pending_node) if we mark the
++	 * packet as locked an then remove it from the queue (or pending set
++	 * respecitvely). Marking the packet as locked avoids re-queueing
++	 * (which should already be prevented by having stopped the treads...)
++	 * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
++	 * new list via other threads (e.g. canellation).
++	 *
++	 * Note 3: There may be overlap between complete_p and complete_q.
++	 * This is handled via test_and_set_bit() on the "completed" flag
++	 * (also handles cancellation).
++	 */
++
++	// mark queued packets as locked and move them to complete_q
++	spin_lock(&ptl->queue.lock);
++	list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
++		// ensure that state does not get zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
++
++		list_del(&p->queue_node);
++		list_add_tail(&p->queue_node, &complete_q);
++	}
++	spin_unlock(&ptl->queue.lock);
++
++	// mark pending packets as locked and move them to complete_p
++	spin_lock(&ptl->pending.lock);
++	list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
++		set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
++		// ensure that state does not get zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
++
++		list_del(&p->pending_node);
++		list_add_tail(&p->pending_node, &complete_q);
++	}
++	atomic_set(&ptl->pending.count, 0);
++	spin_unlock(&ptl->pending.lock);
++
++	// complete and drop packets on complete_q
++	list_for_each_entry(p, &complete_q, queue_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++			__ssh_ptl_complete(p, -ESHUTDOWN);
++
++		ssh_packet_put(p);
++	}
++
++	// complete and drop packets on complete_p
++	list_for_each_entry(p, &complete_p, pending_node) {
++		if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
++			__ssh_ptl_complete(p, -ESHUTDOWN);
++
++		ssh_packet_put(p);
++	}
++
++	/*
++	 * At this point we have guaranteed that the system doesn't reference
++	 * any packets any more.
++	 */
++}
++
++/**
++ * ssh_ptl_init() - Initialize packet transport layer.
++ * @ptl:    The packet transport layer to initialize.
++ * @serdev: The underlying serial device, i.e. the lower-level transport.
++ * @ops:    Packet layer operations.
++ *
++ * Initializes the given packet transport layer. Transmitter and receiver
++ * threads must be started separately via ssh_ptl_tx_start() and
++ * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
++ * lower-level transport layer has been set up.
++ *
++ * Return: Returns zero on success and a nonzero error code on failure.
++ */
++int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
++		 struct ssh_ptl_ops *ops)
++{
++	int i, status;
++
++	ptl->serdev = serdev;
++	ptl->state = 0;
++
++	spin_lock_init(&ptl->queue.lock);
++	INIT_LIST_HEAD(&ptl->queue.head);
++
++	spin_lock_init(&ptl->pending.lock);
++	INIT_LIST_HEAD(&ptl->pending.head);
++	atomic_set_release(&ptl->pending.count, 0);
++
++	ptl->tx.thread = NULL;
++	ptl->tx.thread_signal = false;
++	ptl->tx.packet = NULL;
++	ptl->tx.offset = 0;
++	init_waitqueue_head(&ptl->tx.thread_wq);
++	init_waitqueue_head(&ptl->tx.packet_wq);
++
++	ptl->rx.thread = NULL;
++	init_waitqueue_head(&ptl->rx.wq);
++
++	ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
++	ptl->rtx_timeout.expires = KTIME_MAX;
++	INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
++
++	ptl->ops = *ops;
++
++	// initialize list of recent/blocked SEQs with invalid sequence IDs
++	for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
++		ptl->rx.blocked.seqs[i] = 0xFFFF;
++	ptl->rx.blocked.offset = 0;
++
++	status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
++	if (status)
++		return status;
++
++	status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
++	if (status)
++		kfifo_free(&ptl->rx.fifo);
++
++	return status;
++}
++
++/**
++ * ssh_ptl_destroy() - Deinitialize packet transport layer.
++ * @ptl: The packet transport layer to deinitialize.
++ *
++ * Deinitializes the given packet transport layer and frees resources
++ * associated with it. If receiver and/or transmitter threads have been
++ * started, the layer must first be shut down via ssh_ptl_shutdown() before
++ * this function can be called.
++ */
++void ssh_ptl_destroy(struct ssh_ptl *ptl)
++{
++	kfifo_free(&ptl->rx.fifo);
++	sshp_buf_free(&ptl->rx.buf);
++}
+diff --git a/drivers/misc/surface_aggregator/ssh_packet_layer.h b/drivers/misc/surface_aggregator/ssh_packet_layer.h
+new file mode 100644
+index 000000000000..f3d8a85389d5
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_packet_layer.h
+@@ -0,0 +1,175 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH packet transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
++#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
++
++#include <linux/atomic.h>
++#include <linux/kfifo.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/serdev.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include "ssh_parser.h"
++
++
++/**
++ * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl.
++ *
++ * @SSH_PTL_SF_SHUTDOWN_BIT:
++ *	Indicates that the packet transport layer has been shut down or is
++ *	being shut down and should not accept any new packets/data.
++ */
++enum ssh_ptl_state_flags {
++	SSH_PTL_SF_SHUTDOWN_BIT,
++};
++
++/**
++ * struct ssh_ptl_ops - Callback operations for packet transport layer.
++ * @data_received: Function called when a data-packet has been received. Both,
++ *                 the packet layer on which the packet has been received and
++ *                 the packet's payload data are provided to this function.
++ */
++struct ssh_ptl_ops {
++	void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
++};
++
++/**
++ * struct ssh_ptl - SSH packet transport layer.
++ * @serdev:        Serial device providing the underlying data transport.
++ * @state:         State(-flags) of the transport layer.
++ * @queue:         Packet submission queue.
++ * @queue.lock:    Lock for modifying the packet submission queue.
++ * @queue.head:    List-head of the packet submission queue.
++ * @pending:       Set/list of pending packets.
++ * @pending.lock:  Lock for modifying the pending set.
++ * @pending.head:  List-head of the pending set/list.
++ * @pending.count: Number of currently pending packets.
++ * @tx:            Transmitter subsystem.
++ * @tx.thread_signal: Signal notifying transmitter thread of data to be sent.
++ * @tx.thread:     Transmitter thread.
++ * @tx.thread_wq:  Waitqueue-head for transmitter thread.
++ * @tx.packet_wq:  Waitqueue-head for packet transmit completion.
++ * @tx.packet:     Currently sent packet.
++ * @tx.offset:     Data-offset into the packet currently being transmitted.
++ * @rx:            Receiver subsystem.
++ * @rx.thread:     Receiver thread.
++ * @rx.wq:         Waitqueue-head for receiver thread.
++ * @rx.fifo:       Buffer for receiving data/pushing data to receiver thread.
++ * @rx.buf:        Buffer for evaluating data on receiver thread.
++ * @rx.blocked:    List of recent/blocked sequence IDs to detect retransmission.
++ * @rx.blocked.seqs:   Array of blocked sequence IDs.
++ * @rx.blocked.offset: Offset indicating where a new ID should be inserted.
++ * @rtx_timeout:   Retransmission timeout subsystem.
++ * @rtx_timeout.timeout: Timeout inverval for retransmission.
++ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
++ * @rtx_timeout.reaper:  Work performing timeout checks and subsequent actions.
++ * @ops:           Packet layer operations.
++ */
++struct ssh_ptl {
++	struct serdev_device *serdev;
++	unsigned long state;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++	} queue;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++		atomic_t count;
++	} pending;
++
++	struct {
++		bool thread_signal;
++		struct task_struct *thread;
++		struct wait_queue_head thread_wq;
++		struct wait_queue_head packet_wq;
++		struct ssh_packet *packet;
++		size_t offset;
++	} tx;
++
++	struct {
++		struct task_struct *thread;
++		struct wait_queue_head wq;
++		struct kfifo fifo;
++		struct sshp_buf buf;
++
++		struct {
++			u16 seqs[8];
++			u16 offset;
++		} blocked;
++	} rx;
++
++	struct {
++		ktime_t timeout;
++		ktime_t expires;
++		struct delayed_work reaper;
++	} rtx_timeout;
++
++	struct ssh_ptl_ops ops;
++};
++
++
++#define __ssam_prcond(func, p, fmt, ...)		\
++	do {						\
++		if ((p))				\
++			func((p), fmt, ##__VA_ARGS__);	\
++	} while (0)
++
++#define ptl_dbg(p, fmt, ...)  dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_err(p, fmt, ...)  dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
++#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
++
++#define to_ssh_ptl(ptr, member) \
++	container_of(ptr, struct ssh_ptl, member)
++
++
++int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
++		 struct ssh_ptl_ops *ops);
++
++void ssh_ptl_destroy(struct ssh_ptl *ptl);
++
++/**
++ * ssh_ptl_get_device() - Get device associated with packet transport layer.
++ * @ptl: The packet transport layer.
++ *
++ * Return: Returns the device on which the given packet transport layer builds
++ * upon.
++ */
++static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
++{
++	return ptl->serdev ? &ptl->serdev->dev : NULL;
++}
++
++int ssh_ptl_tx_start(struct ssh_ptl *ptl);
++int ssh_ptl_tx_stop(struct ssh_ptl *ptl);
++int ssh_ptl_rx_start(struct ssh_ptl *ptl);
++int ssh_ptl_rx_stop(struct ssh_ptl *ptl);
++void ssh_ptl_shutdown(struct ssh_ptl *ptl);
++
++int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
++void ssh_ptl_cancel(struct ssh_packet *p);
++
++int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
++void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl);
++
++void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
++		     u8 priority, const struct ssh_packet_ops *ops);
++
++int ssh_ctrl_packet_cache_init(void);
++void ssh_ctrl_packet_cache_destroy(void);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
+diff --git a/drivers/misc/surface_aggregator/ssh_parser.c b/drivers/misc/surface_aggregator/ssh_parser.c
+new file mode 100644
+index 000000000000..575cbc039ad7
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_parser.c
+@@ -0,0 +1,229 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH message parser.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/compiler.h>
++#include <linux/device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include "ssh_parser.h"
++
++
++/**
++ * sshp_validate_crc() - Validate a CRC in raw message data.
++ * @src: The span of data over which the CRC should be computed.
++ * @crc: The pointer to the expected u16 CRC value.
++ *
++ * Computes the CRC of the provided data span (@src), compares it to the CRC
++ * stored at the given address (@crc), and returns the result of this
++ * comparison, i.e. %true iff equal. This function is intended to run on raw
++ * input/message data.
++ *
++ * Return: Returns %true iff the computed CRC matches the stored CRC, %false
++ * otherwise.
++ */
++static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
++{
++	u16 actual = ssh_crc(src->ptr, src->len);
++	u16 expected = get_unaligned_le16(crc);
++
++	return actual == expected;
++}
++
++/**
++ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
++ * @src: The data span to check the start of.
++ */
++static bool sshp_starts_with_syn(const struct ssam_span *src)
++{
++	return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
++}
++
++/**
++ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
++ * @src: The data span to search in.
++ * @rem: The span (output) indicating the remaining data, starting with SSH
++ *       SYN bytes, if found.
++ *
++ * Search for SSH SYN bytes in the given source span. If found, set the @rem
++ * span to the remaining data, starting with the first SYN bytes and capped by
++ * the source span length, and return %true. This function does not copy
++ * any data, but rather only sets pointers to the respecitve start addresses
++ * and length values.
++ *
++ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
++ * span at the end of the source span and return %false.
++ *
++ * If partial SSH SYN bytes could be found at the end of the source span, set
++ * the @rem span to cover these partial SYN bytes, capped by the end of the
++ * source span, and return %false. This function should then be re-run once
++ * more data is available.
++ *
++ * Return: Returns %true iff a complete SSG SYN sequence could be found,
++ * %false otherwise.
++ */
++bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
++{
++	size_t i;
++
++	for (i = 0; i < src->len - 1; i++) {
++		if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
++			rem->ptr = src->ptr + i;
++			rem->len = src->len - i;
++			return true;
++		}
++	}
++
++	if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
++		rem->ptr = src->ptr + src->len - 1;
++		rem->len = 1;
++		return false;
++	}
++
++	rem->ptr = src->ptr + src->len;
++	rem->len = 0;
++	return false;
++}
++
++/**
++ * sshp_parse_frame() - Parse SSH frame.
++ * @dev: The device used for logging.
++ * @source: The source to parse from.
++ * @frame: The parsed frame (output).
++ * @payload: The parsed payload (output).
++ * @maxlen: The maximum supported message length.
++ *
++ * Parses and validates a SSH frame, including its payload, from the given
++ * source. Sets the provided @frame pointer to the start of the frame and
++ * writes the limits of the frame payload to the provided @payload span
++ * pointer.
++ *
++ * This function does not copy any data, but rather only validates the message
++ * data and sets pointers (and length values) to indicate the respective parts.
++ *
++ * If no complete SSH frame could be found, the frame pointer will be set to
++ * the %NULL pointer and the payload span will be set to the null span (start
++ * pointer %NULL, size zero).
++ *
++ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
++ * the start of the message is invalid, %-EBADMSG if any (frame-header or
++ * payload) CRC is ivnalid, or %-EMSGSIZE if the SSH message is bigger than
++ * the maximum message length specified in the @maxlen parameter.
++ */
++int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
++		     struct ssh_frame **frame, struct ssam_span *payload,
++		     size_t maxlen)
++{
++	struct ssam_span sf;
++	struct ssam_span sp;
++
++	// initialize output
++	*frame = NULL;
++	payload->ptr = NULL;
++	payload->len = 0;
++
++	if (!sshp_starts_with_syn(source)) {
++		dev_warn(dev, "rx: parser: invalid start of frame\n");
++		return -ENOMSG;
++	}
++
++	// check for minimum packet length
++	if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
++		dev_dbg(dev, "rx: parser: not enough data for frame\n");
++		return 0;
++	}
++
++	// pin down frame
++	sf.ptr = source->ptr + sizeof(u16);
++	sf.len = sizeof(struct ssh_frame);
++
++	// validate frame CRC
++	if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
++		dev_warn(dev, "rx: parser: invalid frame CRC\n");
++		return -EBADMSG;
++	}
++
++	// ensure packet does not exceed maximum length
++	sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
++	if (unlikely(sp.len + SSH_MESSAGE_LENGTH(0) > maxlen)) {
++		dev_warn(dev, "rx: parser: frame too large: %u bytes\n",
++			 ((struct ssh_frame *)sf.ptr)->len);
++		return -EMSGSIZE;
++	}
++
++	// pin down payload
++	sp.ptr = sf.ptr + sf.len + sizeof(u16);
++
++	// check for frame + payload length
++	if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
++		dev_dbg(dev, "rx: parser: not enough data for payload\n");
++		return 0;
++	}
++
++	// validate payload crc
++	if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
++		dev_warn(dev, "rx: parser: invalid payload CRC\n");
++		return -EBADMSG;
++	}
++
++	*frame = (struct ssh_frame *)sf.ptr;
++	*payload = sp;
++
++	dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n",
++		(*frame)->type, (*frame)->len);
++
++	return 0;
++}
++
++/**
++ * sshp_parse_command() - Parse SSH command frame payload.
++ * @dev: The device used for logging.
++ * @source: The source to parse from.
++ * @command: The parsed command (output).
++ * @command_data: The parsed command data/payload (output).
++ *
++ * Parses and validates a SSH command frame payload. Sets the @command pointer
++ * to the command header and the @command_data span to the command data (i.e.
++ * payload of the command). This will result in a zero-length span if the
++ * command does not have any associated data/payload. This function does not
++ * check the frame-payload-type field, which should be checked by the caller
++ * before calling this function.
++ *
++ * The @source parameter should be the complete frame payload, e.g. returned
++ * by the sshp_parse_frame() command.
++ *
++ * This function does not copy any data, but rather only validates the frame
++ * payload data and sets pointers (and length values) to indicate the
++ * respective parts.
++ *
++ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
++ * valid command-type frame payload, i.e. is too short.
++ */
++int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
++		       struct ssh_command **command,
++		       struct ssam_span *command_data)
++{
++	// check for minimum length
++	if (unlikely(source->len < sizeof(struct ssh_command))) {
++		*command = NULL;
++		command_data->ptr = NULL;
++		command_data->len = 0;
++
++		dev_err(dev, "rx: parser: command payload is too short\n");
++		return -ENOMSG;
++	}
++
++	*command = (struct ssh_command *)source->ptr;
++	command_data->ptr = source->ptr + sizeof(struct ssh_command);
++	command_data->len = source->len - sizeof(struct ssh_command);
++
++	dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x, cid: 0x%02x)\n",
++		(*command)->tc, (*command)->cid);
++
++	return 0;
++}
+diff --git a/drivers/misc/surface_aggregator/ssh_parser.h b/drivers/misc/surface_aggregator/ssh_parser.h
+new file mode 100644
+index 000000000000..71c43ab07bf6
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_parser.h
+@@ -0,0 +1,157 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH message parser.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
++#define _SURFACE_AGGREGATOR_SSH_PARSER_H
++
++#include <linux/device.h>
++#include <linux/kfifo.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++
++/**
++ * struct sshp_buf - Parser buffer for SSH messages.
++ * @ptr: Pointer to the beginning of the buffer.
++ * @len: Number of bytes used in the buffer.
++ * @cap: Maximum capacity of the buffer.
++ */
++struct sshp_buf {
++	u8    *ptr;
++	size_t len;
++	size_t cap;
++};
++
++/**
++ * sshp_buf_init() - Initialize a SSH parser buffer.
++ * @buf: The buffer to initialize.
++ * @ptr: The memory backing the buffer.
++ * @cap: The length of the memory backing the buffer, i.e. its capacity.
++ *
++ * Initializes the buffer with the given memory as backing and set its used
++ * length to zero.
++ */
++static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
++{
++	buf->ptr = ptr;
++	buf->len = 0;
++	buf->cap = cap;
++}
++
++/**
++ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
++ * @buf:   The buffer to initialize/allocate to.
++ * @cap:   The desired capacity of the buffer.
++ * @flags: The flags used for allocating the memory.
++ *
++ * Allocates @cap bytes and initializes the provided buffer struct with the
++ * allocated memory.
++ *
++ * Return: Returns zero on success and %-ENOMEM if allocation failed.
++ */
++static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
++{
++	u8 *ptr;
++
++	ptr = kzalloc(cap, flags);
++	if (!ptr)
++		return -ENOMEM;
++
++	sshp_buf_init(buf, ptr, cap);
++	return 0;
++
++}
++
++/**
++ * sshp_buf_free() - Free a SSH parser buffer.
++ * @buf: The buffer to free.
++ *
++ * Frees a SSH parser buffer by freeing the memory backing it and then
++ * resetting its pointer to %NULL and length and capacity to zero. Intended to
++ * free a buffer previously allocated with sshp_buf_alloc().
++ */
++static inline void sshp_buf_free(struct sshp_buf *buf)
++{
++	kfree(buf->ptr);
++	buf->ptr = NULL;
++	buf->len = 0;
++	buf->cap = 0;
++}
++
++/**
++ * sshp_buf_drop() - Drop data from the beginning of the buffer.
++ * @buf: The buffer to drop data from.
++ * @n:   The number of bytes to drop.
++ *
++ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
++ * the beginning of the buffer.
++ */
++static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
++{
++	memmove(buf->ptr, buf->ptr + n, buf->len - n);
++	buf->len -= n;
++}
++
++/**
++ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
++ * @buf:  The buffer to write the data into.
++ * @fifo: The fifo to read the data from.
++ *
++ * Transfers the data contained in the fifo to the buffer, removing it from
++ * the fifo. This function will try to transfer as much data as possible,
++ * limited either by the remaining space in the buffer or by the number of
++ * bytes available in the fifo.
++ *
++ * Return: Returns the number of bytes transferred.
++ */
++static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
++					     struct kfifo *fifo)
++{
++	size_t n;
++
++	n =  kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
++	buf->len += n;
++
++	return n;
++}
++
++/**
++ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
++ * @buf:    The buffer to create the span from.
++ * @offset: The offset in the buffer at which the span should start.
++ * @span:   The span to initialize (output).
++ *
++ * Initializes the provided span to point to the memory at the given offset in
++ * the buffer, with the length of the span being capped by the number of bytes
++ * used in the buffer after the offset (i.e. bytes remaining after the
++ * offset).
++ *
++ * Warning: This function does not validate that @offset is less than or equal
++ * to the number of bytes used in the buffer or the buffer capacity. This must
++ * be guaranteed by the caller.
++ */
++static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
++				      struct ssam_span *span)
++{
++	span->ptr = buf->ptr + offset;
++	span->len = buf->len - offset;
++}
++
++
++bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
++
++int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
++		     struct ssh_frame **frame, struct ssam_span *payload,
++		     size_t maxlen);
++
++int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
++		       struct ssh_command **command,
++		       struct ssam_span *command_data);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */
+diff --git a/drivers/misc/surface_aggregator/ssh_request_layer.c b/drivers/misc/surface_aggregator/ssh_request_layer.c
+new file mode 100644
+index 000000000000..f47bd949b6c3
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_request_layer.c
+@@ -0,0 +1,1254 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * SSH request transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#include <asm/unaligned.h>
++#include <linux/atomic.h>
++#include <linux/completion.h>
++#include <linux/error-injection.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include <linux/surface_aggregator/controller.h>
++
++#include "ssh_packet_layer.h"
++#include "ssh_request_layer.h"
++
++#include "trace.h"
++
++
++/*
++ * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
++ *
++ * Timeout as ktime_t delta for request responses. If we have not received a
++ * response in this time-frame after finishing the underlying packet
++ * transmission, the request will be completed with %-ETIMEDOUT as status
++ * code.
++ */
++#define SSH_RTL_REQUEST_TIMEOUT			ms_to_ktime(3000)
++
++/*
++ * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
++ *
++ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
++ * direct re-scheduling of reaper work_struct.
++ */
++#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION	ms_to_ktime(max(2000 / HZ, 50))
++
++/*
++ * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
++ *
++ * Maximum number of requests concurrently waiting to be completed (i.e.
++ * waiting for the corresponding packet transmission to finish if they don't
++ * have a response or waiting for a response if they have one).
++ */
++#define SSH_RTL_MAX_PENDING		3
++
++
++#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
++
++/**
++ * ssh_rtl_should_drop_response() - Error injection hook to drop request
++ * responses.
++ *
++ * Useful to cause request transmission timeouts in the driver by dropping the
++ * response to a request.
++ */
++static noinline bool ssh_rtl_should_drop_response(void)
++{
++	return false;
++}
++ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
++
++#else
++
++static inline bool ssh_rtl_should_drop_response(void)
++{
++	return false;
++}
++
++#endif
++
++
++static u16 ssh_request_get_rqid(struct ssh_request *rqst)
++{
++	return get_unaligned_le16(rqst->packet.data.ptr
++				  + SSH_MSGOFFSET_COMMAND(rqid));
++}
++
++static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
++{
++	if (!rqst->packet.data.ptr)
++		return (u32)-1;
++
++	return ssh_request_get_rqid(rqst);
++}
++
++
++static void ssh_rtl_queue_remove(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->queue.lock);
++
++	if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return;
++	}
++
++	list_del(&rqst->node);
++
++	spin_unlock(&rtl->queue.lock);
++	ssh_request_put(rqst);
++}
++
++static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
++{
++	bool empty;
++
++	spin_lock(&rtl->queue.lock);
++	empty = list_empty(&rtl->queue.head);
++	spin_unlock(&rtl->queue.lock);
++
++	return empty;
++}
++
++
++static void ssh_rtl_pending_remove(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->pending.lock);
++
++	if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return;
++	}
++
++	atomic_dec(&rtl->pending.count);
++	list_del(&rqst->node);
++
++	spin_unlock(&rtl->pending.lock);
++
++	ssh_request_put(rqst);
++}
++
++static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	spin_lock(&rtl->pending.lock);
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return -EINVAL;
++	}
++
++	if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
++		spin_unlock(&rtl->pending.lock);
++		return -EALREADY;
++	}
++
++	atomic_inc(&rtl->pending.count);
++	list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
++
++	spin_unlock(&rtl->pending.lock);
++	return 0;
++}
++
++
++static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	trace_ssam_request_complete(rqst, status);
++
++	// rtl/ptl may not be set if we're cancelling before submitting
++	rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x, status: %d)\n",
++		     ssh_request_get_rqid_safe(rqst), status);
++
++	rqst->ops->complete(rqst, NULL, NULL, status);
++}
++
++static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
++				      const struct ssh_command *cmd,
++				      const struct ssam_span *data)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	trace_ssam_request_complete(rqst, 0);
++
++	rtl_dbg(rtl, "rtl: completing request with response (rqid: 0x%04x)\n",
++		ssh_request_get_rqid(rqst));
++
++	rqst->ops->complete(rqst, cmd, data, 0);
++}
++
++
++static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++
++	if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
++		return !atomic_read(&rtl->pending.count);
++
++	return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
++}
++
++static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
++{
++	struct ssh_request *rqst = ERR_PTR(-ENOENT);
++	struct ssh_request *p, *n;
++
++	spin_lock(&rtl->queue.lock);
++
++	// find first non-locked request and remove it
++	list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
++		if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
++			continue;
++
++		if (!ssh_rtl_tx_can_process(p)) {
++			rqst = ERR_PTR(-EBUSY);
++			break;
++		}
++
++		// remove from queue and mark as transmitting
++		set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
++		// ensure state never gets zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
++
++		list_del(&p->node);
++
++		rqst = p;
++		break;
++	}
++
++	spin_unlock(&rtl->queue.lock);
++	return rqst;
++}
++
++static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
++{
++	struct ssh_request *rqst;
++	int status;
++
++	// get and prepare next request for transmit
++	rqst = ssh_rtl_tx_next(rtl);
++	if (IS_ERR(rqst))
++		return PTR_ERR(rqst);
++
++	// add to/mark as pending
++	status = ssh_rtl_tx_pending_push(rqst);
++	if (status) {
++		ssh_request_put(rqst);
++		return -EAGAIN;
++	}
++
++	// submit packet
++	status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
++	if (status == -ESHUTDOWN) {
++		/*
++		 * Packet has been refused due to the packet layer shutting
++		 * down. Complete it here.
++		 */
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
++		/*
++		 * Note: A barrier is not required here, as there are only two
++		 * references in the system at this point: The one that we have,
++		 * and the other one that belongs to the pending set. Due to the
++		 * request being marked as "transmitting", our process is the
++		 * only one allowed to remove the pending node and change the
++		 * state. Normally, the task would fall to the packet callback,
++		 * but as this is a path where submission failed, this callback
++		 * will never be executed.
++		 */
++
++		ssh_rtl_pending_remove(rqst);
++		ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
++
++		ssh_request_put(rqst);
++		return -ESHUTDOWN;
++
++	} else if (status) {
++		/*
++		 * If submitting the packet failed and the packet layer isn't
++		 * shutting down, the packet has either been submmitted/queued
++		 * before (-EALREADY, which cannot happen as we have guaranteed
++		 * that requests cannot be re-submitted), or the packet was
++		 * marked as locked (-EINVAL). To mark the packet locked at this
++		 * stage, the request, and thus the packets itself, had to have
++		 * been canceled. Simply drop the reference. Cancellation itself
++		 * will remove it from the set of pending requests.
++		 */
++
++		WARN_ON(status != -EINVAL);
++
++		ssh_request_put(rqst);
++		return -EAGAIN;
++	}
++
++	ssh_request_put(rqst);
++	return 0;
++}
++
++static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
++{
++	if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
++		return false;
++
++	if (ssh_rtl_queue_empty(rtl))
++		return false;
++
++	return schedule_work(&rtl->tx.work);
++}
++
++static void ssh_rtl_tx_work_fn(struct work_struct *work)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
++	int i, status;
++
++	/*
++	 * Try to be nice and not block/live-lock the workqueue: Run a maximum
++	 * of 10 tries, then re-submit if necessary. This should not be
++	 * necessary for normal execution, but guarantee it anyway.
++	 */
++	for (i = 0; i < 10; i++) {
++		status = ssh_rtl_tx_try_process_one(rtl);
++		if (status == -ENOENT || status == -EBUSY)
++			return;		// no more requests to process
++
++		if (status == -ESHUTDOWN) {
++			/*
++			 * Packet system shutting down. No new packets can be
++			 * transmitted. Return silently, the party initiating
++			 * the shutdown should handle the rest.
++			 */
++			return;
++		}
++
++		WARN_ON(status != 0 && status != -EAGAIN);
++	}
++
++	// out of tries, reschedule
++	ssh_rtl_tx_schedule(rtl);
++}
++
++
++/**
++ * ssh_rtl_submit() - Submit a request to the transport layer.
++ * @rtl:  The request transport layer.
++ * @rqst: The request to submit.
++ *
++ * Submits a request to the transport layer. A single request may not be
++ * submitted multiple times without reinitializing it.
++ *
++ * Return: Returns zero on success, %-EINVAL if the request type is invalid or
++ * the request has been canceled prior to submission, %-EALREADY if the
++ * request has already been submitted, or %-ESHUTDOWN in case the request
++ * transport layer has been shut down.
++ */
++int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
++{
++	trace_ssam_request_submit(rqst);
++
++	/*
++	 * Ensure that requests expecting a response are sequenced. If this
++	 * invariant ever changes, see the comment in ssh_rtl_complete() on what
++	 * is required to be changed in the code.
++	 */
++	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
++		if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
++			return -EINVAL;
++
++	spin_lock(&rtl->queue.lock);
++
++	/*
++	 * Try to set ptl and check if this request has already been submitted.
++	 *
++	 * Must be inside lock as we might run into a lost update problem
++	 * otherwise: If this were outside of the lock, cancellation in
++	 * ssh_rtl_cancel_nonpending() may run after we've set the ptl
++	 * reference but before we enter the lock. In that case, we'd detect
++	 * that the request is being added to the queue and would try to remove
++	 * it from that, but removal might fail because it hasn't actually been
++	 * added yet. By putting this cmpxchg in the critical section, we
++	 * ensure that the queuing detection only triggers when we are already
++	 * in the critical section and the remove process will wait until the
++	 * push operation has been completed (via lock) due to that. Only then,
++	 * we can safely try to remove it.
++	 */
++	if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl) != NULL) {
++		spin_unlock(&rtl->queue.lock);
++		return -EALREADY;
++	}
++
++	/*
++	 * Ensure that we set ptl reference before we continue modifying state.
++	 * This is required for non-pending cancellation. This barrier is paired
++	 * with the one in ssh_rtl_cancel_nonpending().
++	 *
++	 * By setting the ptl reference before we test for "locked", we can
++	 * check if the "locked" test may have already run. See comments in
++	 * ssh_rtl_cancel_nonpending() for more detail.
++	 */
++	smp_mb__after_atomic();
++
++	if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return -ESHUTDOWN;
++	}
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
++		spin_unlock(&rtl->queue.lock);
++		return -EINVAL;
++	}
++
++	set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
++	list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
++
++	spin_unlock(&rtl->queue.lock);
++
++	ssh_rtl_tx_schedule(rtl);
++	return 0;
++}
++
++static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
++				       ktime_t expires)
++{
++	unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
++	ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
++	ktime_t old_exp, old_act;
++
++	// re-adjust / schedule reaper if it is above resolution delta
++	old_act = READ_ONCE(rtl->rtx_timeout.expires);
++	if (ktime_after(aexp, old_act))
++		return;
++
++	do {
++		old_exp = old_act;
++		old_act = cmpxchg64(&rtl->rtx_timeout.expires, old_exp, expires);
++	} while (old_exp != old_act && ktime_before(aexp, old_act));
++
++	// if we updated the reaper expiration, modify work timeout
++	if (old_exp == old_act && old_act != expires)
++		mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
++}
++
++static void ssh_rtl_timeout_start(struct ssh_request *rqst)
++{
++	struct ssh_rtl *rtl = ssh_request_rtl(rqst);
++	ktime_t timestamp = ktime_get_coarse_boottime();
++	ktime_t timeout = rtl->rtx_timeout.timeout;
++
++	if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
++		return;
++
++	WRITE_ONCE(rqst->timestamp, timestamp);
++	/*
++	 * Ensure timestamp is set before starting the reaper. Paired with
++	 * implicit barrier following check on ssh_request_get_expiration() in
++	 * ssh_rtl_timeout_reap.
++	 */
++	smp_mb__after_atomic();
++
++	ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
++}
++
++
++static void ssh_rtl_complete(struct ssh_rtl *rtl,
++			     const struct ssh_command *command,
++			     const struct ssam_span *command_data)
++{
++	struct ssh_request *r = NULL;
++	struct ssh_request *p, *n;
++	u16 rqid = get_unaligned_le16(&command->rqid);
++
++	trace_ssam_rx_response_received(command, command_data->len);
++
++	/*
++	 * Get request from pending based on request ID and mark it as response
++	 * received and locked.
++	 */
++	spin_lock(&rtl->pending.lock);
++	list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
++		// we generally expect requests to be processed in order
++		if (unlikely(ssh_request_get_rqid(p) != rqid))
++			continue;
++
++		// simulate response timeout
++		if (ssh_rtl_should_drop_response()) {
++			spin_unlock(&rtl->pending.lock);
++
++			trace_ssam_ei_rx_drop_response(p);
++			rtl_info(rtl, "request error injection: dropping response for request %p\n",
++				 &p->packet);
++			return;
++		}
++
++		/*
++		 * Mark as "response received" and "locked" as we're going to
++		 * complete it.
++		 */
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
++		set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
++		// ensure state never gets zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
++
++		atomic_dec(&rtl->pending.count);
++		list_del(&p->node);
++
++		r = p;
++		break;
++	}
++	spin_unlock(&rtl->pending.lock);
++
++	if (!r) {
++		rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = 0x%04x)\n",
++			 rqid);
++		return;
++	}
++
++	// if the request hasn't been completed yet, we will do this now
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
++		ssh_request_put(r);
++		ssh_rtl_tx_schedule(rtl);
++		return;
++	}
++
++	/*
++	 * Make sure the request has been transmitted. In case of a sequenced
++	 * request, we are guaranteed that the completion callback will run on
++	 * the receiver thread directly when the ACK for the packet has been
++	 * received. Similarly, this function is guaranteed to run on the
++	 * receiver thread. Thus we are guaranteed that if the packet has been
++	 * successfully transmitted and received an ACK, the transmitted flag
++	 * has been set and is visible here.
++	 *
++	 * We are currently not handling unsequenced packets here, as those
++	 * should never expect a response as ensured in ssh_rtl_submit. If this
++	 * ever changes, one would have to test for
++	 *
++	 *	(r->state & (transmitting | transmitted))
++	 *
++	 * on unsequenced packets to determine if they could have been
++	 * transmitted. There are no synchronization guarantees as in the
++	 * sequenced case, since, in this case, the callback function will not
++	 * run on the same thread. Thus an exact determination is impossible.
++	 */
++	if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
++		rtl_err(rtl, "rtl: received response before ACK for request (rqid = 0x%04x)\n",
++			rqid);
++
++		/*
++		 * NB: Timeout has already been canceled, request already been
++		 * removed from pending and marked as locked and completed. As
++		 * we receive a "false" response, the packet might still be
++		 * queued though.
++		 */
++		ssh_rtl_queue_remove(r);
++
++		ssh_rtl_complete_with_status(r, -EREMOTEIO);
++		ssh_request_put(r);
++
++		ssh_rtl_tx_schedule(rtl);
++		return;
++	}
++
++	/*
++	 * NB: Timeout has already been canceled, request already been
++	 * removed from pending and marked as locked and completed. The request
++	 * can also not be queued any more, as it has been marked as
++	 * transmitting and later transmitted. Thus no need to remove it from
++	 * anywhere.
++	 */
++
++	ssh_rtl_complete_with_rsp(r, command, command_data);
++	ssh_request_put(r);
++
++	ssh_rtl_tx_schedule(rtl);
++}
++
++
++static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
++{
++	struct ssh_rtl *rtl;
++	unsigned long flags, fixed;
++	bool remove;
++
++	/*
++	 * Handle unsubmitted request: Try to mark the packet as locked,
++	 * expecting the state to be zero (i.e. unsubmitted). Note that, if
++	 * setting the state worked, we might still be adding the packet to the
++	 * queue in a currently executing submit call. In that case, however,
++	 * ptl reference must have been set previously, as locked is checked
++	 * after setting ptl. Furthermore, when the ptl reference is set, the
++	 * submission process is guaranteed to have entered the critical
++	 * section. Thus only if we successfully locked this request and ptl is
++	 * NULL, we have successfully removed the request, i.e. we are
++	 * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
++	 * packet will never be added. Otherwise, we need to try and grab it
++	 * from the queue, where we are now guaranteed that the packet is or has
++	 * been due to the critical section.
++	 *
++	 * Note that if the CMPXCHG fails, we are guaranteed that ptl has
++	 * been set and is non-NULL, as states can only be nonzero after this
++	 * has been set. Also note that we need to fetch the static (type) flags
++	 * to ensure that they don't cause the cmpxchg to fail.
++	 */
++	fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
++	flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
++
++	/*
++	 * Force correct ordering with regards to state and ptl reference access
++	 * to safe-guard cancellation to concurrent submission against a
++	 * lost-update problem. First try to exchange state, then also check
++	 * ptl if that worked. This barrier is paired with the
++	 * one in ssh_rtl_submit().
++	 */
++	smp_mb__after_atomic();
++
++	if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return true;
++
++		ssh_rtl_complete_with_status(r, -ECANCELED);
++		return true;
++	}
++
++	rtl = ssh_request_rtl(r);
++	spin_lock(&rtl->queue.lock);
++
++	/*
++	 * Note: 1) Requests cannot be re-submitted. 2) If a request is queued,
++	 * it cannot be "transmitting"/"pending" yet. Thus, if we successfully
++	 * remove the request here, we have removed all its occurences in the
++	 * system.
++	 */
++
++	remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
++	if (!remove) {
++		spin_unlock(&rtl->queue.lock);
++		return false;
++	}
++
++	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++	list_del(&r->node);
++
++	spin_unlock(&rtl->queue.lock);
++
++	ssh_request_put(r);	// drop reference obtained from queue
++
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return true;
++
++	ssh_rtl_complete_with_status(r, -ECANCELED);
++	return true;
++}
++
++static bool ssh_rtl_cancel_pending(struct ssh_request *r)
++{
++	// if the packet is already locked, it's going to be removed shortly
++	if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
++		return true;
++
++	/*
++	 * Now that we have locked the packet, we have guaranteed that it can't
++	 * be added to the system any more. If ptl is zero, the locked
++	 * check in ssh_rtl_submit() has not been run and any submission,
++	 * currently in progress or called later, won't add the packet. Thus we
++	 * can directly complete it.
++	 *
++	 * The implicit memory barrier of test_and_set_bit() should be enough
++	 * to ensure that the correct order (first lock, then check ptl) is
++	 * ensured. This is paired with the barrier in ssh_rtl_submit().
++	 */
++	if (!READ_ONCE(r->packet.ptl)) {
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return true;
++
++		ssh_rtl_complete_with_status(r, -ECANCELED);
++		return true;
++	}
++
++	/*
++	 * Try to cancel the packet. If the packet has not been completed yet,
++	 * this will subsequently (and synchronously) call the completion
++	 * callback of the packet, which will complete the request.
++	 */
++	ssh_ptl_cancel(&r->packet);
++
++	/*
++	 * If the packet has been completed with success, i.e. has not been
++	 * canceled by the above call, the request may not have been completed
++	 * yet (may be waiting for a response). Check if we need to do this
++	 * here.
++	 */
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return true;
++
++	ssh_rtl_queue_remove(r);
++	ssh_rtl_pending_remove(r);
++	ssh_rtl_complete_with_status(r, -ECANCELED);
++
++	return true;
++}
++
++/**
++ * ssh_rtl_cancel() - Cancel request.
++ * @rqst:    The request to cancel.
++ * @pending: Whether to also cancel pending requests.
++ *
++ * Cancels the given request. If @pending is %false, this will not cancel
++ * pending requests, i.e. requests that have already been submitted to the
++ * packet layer but not been completed yet. If @pending is %true, this will
++ * cancel the given request regardless of the state it is in.
++ *
++ * If the request has been canceled by calling this function, both completion
++ * and release callbacks of the request will be executed in a reasonable
++ * time-frame. This may happen during execution of this function, however,
++ * there is no guarantee for this. For example, a request currently
++ * transmitting will be canceled/completed only after transmission has
++ * completed, and the respective callbacks will be executed on the transmitter
++ * thread, which may happen during, but also some time after execution of the
++ * cancel function.
++ *
++ * Return: Returns %true iff the given request has been canceled or completed,
++ * either by this function or prior to calling this function, %false
++ * otherwise. If @pending is %true, this function will always return %true.
++ */
++bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
++{
++	struct ssh_rtl *rtl;
++	bool canceled;
++
++	if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
++		return true;
++
++	trace_ssam_request_cancel(rqst);
++
++	if (pending)
++		canceled = ssh_rtl_cancel_pending(rqst);
++	else
++		canceled = ssh_rtl_cancel_nonpending(rqst);
++
++	// note: rtl may be NULL if request has not been submitted yet
++	rtl = ssh_request_rtl(rqst);
++	if (canceled && rtl)
++		ssh_rtl_tx_schedule(rtl);
++
++	return canceled;
++}
++
++
++static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
++{
++	struct ssh_request *r = to_ssh_request(p);
++
++	if (unlikely(status)) {
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++
++		if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			return;
++
++		/*
++		 * The packet may get cancelled even though it has not been
++		 * submitted yet. The request may still be queued. Check the
++		 * queue and remove it if necessary. As the timeout would have
++		 * been started in this function on success, there's no need to
++		 * cancel it here.
++		 */
++		ssh_rtl_queue_remove(r);
++		ssh_rtl_pending_remove(r);
++		ssh_rtl_complete_with_status(r, status);
++
++		ssh_rtl_tx_schedule(ssh_request_rtl(r));
++		return;
++	}
++
++	// update state: mark as transmitted and clear transmitting
++	set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
++	// ensure state never gets zero
++	smp_mb__before_atomic();
++	clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
++
++	// if we expect a response, we just need to start the timeout
++	if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
++		ssh_rtl_timeout_start(r);
++		return;
++	}
++
++	/*
++	 * If we don't expect a response, lock, remove, and complete the
++	 * request. Note that, at this point, the request is guaranteed to have
++	 * left the queue and no timeout has been started. Thus we only need to
++	 * remove it from pending. If the request has already been completed (it
++	 * may have been canceled) return.
++	 */
++
++	set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++	if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++		return;
++
++	ssh_rtl_pending_remove(r);
++	ssh_rtl_complete_with_status(r, 0);
++
++	ssh_rtl_tx_schedule(ssh_request_rtl(r));
++}
++
++
++static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
++{
++	ktime_t timestamp = READ_ONCE(r->timestamp);
++
++	if (timestamp != KTIME_MAX)
++		return ktime_add(timestamp, timeout);
++	else
++		return KTIME_MAX;
++}
++
++static void ssh_rtl_timeout_reap(struct work_struct *work)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
++	struct ssh_request *r, *n;
++	LIST_HEAD(claimed);
++	ktime_t now = ktime_get_coarse_boottime();
++	ktime_t timeout = rtl->rtx_timeout.timeout;
++	ktime_t next = KTIME_MAX;
++
++	trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count));
++
++	/*
++	 * Mark reaper as "not pending". This is done before checking any
++	 * requests to avoid lost-update type problems.
++	 */
++	WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX);
++	/*
++	 * Ensure that the reaper is marked as deactivated before we continue
++	 * checking requests to prevent lost-update problems when a request is
++	 * added to the pending set and ssh_rtl_timeout_reaper_mod is called
++	 * during execution of the part below.
++	 */
++	smp_mb__after_atomic();
++
++	spin_lock(&rtl->pending.lock);
++	list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
++		ktime_t expires = ssh_request_get_expiration(r, timeout);
++
++		/*
++		 * Check if the timeout hasn't expired yet. Find out next
++		 * expiration date to be handled after this run.
++		 */
++		if (ktime_after(expires, now)) {
++			next = ktime_before(expires, next) ? expires : next;
++			continue;
++		}
++
++		// avoid further transitions if locked
++		if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
++			continue;
++
++		/*
++		 * We have now marked the packet as locked. Thus it cannot be
++		 * added to the pending or queued lists again after we've
++		 * removed it here. We can therefore re-use the node of this
++		 * packet temporarily.
++		 */
++
++		clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
++
++		atomic_dec(&rtl->pending.count);
++		list_del(&r->node);
++
++		list_add_tail(&r->node, &claimed);
++	}
++	spin_unlock(&rtl->pending.lock);
++
++	// cancel and complete the request
++	list_for_each_entry_safe(r, n, &claimed, node) {
++		trace_ssam_request_timeout(r);
++
++		/*
++		 * At this point we've removed the packet from pending. This
++		 * means that we've obtained the last (only) reference of the
++		 * system to it. Thus we can just complete it.
++		 */
++		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			ssh_rtl_complete_with_status(r, -ETIMEDOUT);
++
++		// drop the reference we've obtained by removing it from pending
++		list_del(&r->node);
++		ssh_request_put(r);
++	}
++
++	// ensure that reaper doesn't run again immediately
++	next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
++	if (next != KTIME_MAX)
++		ssh_rtl_timeout_reaper_mod(rtl, now, next);
++
++	ssh_rtl_tx_schedule(rtl);
++}
++
++
++static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
++			     const struct ssam_span *data)
++{
++	trace_ssam_rx_event_received(cmd, data->len);
++
++	rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n",
++		get_unaligned_le16(&cmd->rqid));
++
++	rtl->ops.handle_event(rtl, cmd, data);
++}
++
++static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
++{
++	struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
++	struct device *dev = &p->serdev->dev;
++	struct ssh_command *command;
++	struct ssam_span command_data;
++
++	if (sshp_parse_command(dev, data, &command, &command_data))
++		return;
++
++	if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
++		ssh_rtl_rx_event(rtl, command, &command_data);
++	else
++		ssh_rtl_complete(rtl, command, &command_data);
++}
++
++static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
++{
++	if (!data->len) {
++		ptl_err(p, "rtl: rx: no data frame payload\n");
++		return;
++	}
++
++	switch (data->ptr[0]) {
++	case SSH_PLD_TYPE_CMD:
++		ssh_rtl_rx_command(p, data);
++		break;
++
++	default:
++		ptl_err(p, "rtl: rx: unknown frame payload type (type: 0x%02x)\n",
++			data->ptr[0]);
++		break;
++	}
++}
++
++
++static void ssh_rtl_packet_release(struct ssh_packet *p)
++{
++	struct ssh_request *rqst;
++
++	rqst = to_ssh_request(p);
++	rqst->ops->release(rqst);
++}
++
++static const struct ssh_packet_ops ssh_rtl_packet_ops = {
++	.complete = ssh_rtl_packet_callback,
++	.release = ssh_rtl_packet_release,
++};
++
++/**
++ * ssh_request_init() - Initialize SSH request.
++ * @rqst:  The request to initialize.
++ * @flags: Request flags, determining the type of the request.
++ * @ops:   Request operations.
++ *
++ * Initializes the given SSH request and underlying packet. Sets the message
++ * buffer pointer to %NULL and the message buffer length to zero. This buffer
++ * has to be set separately via ssh_request_set_data() before submission and
++ * must contain a valid SSH request message.
++ */
++void ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
++		      const struct ssh_request_ops *ops)
++{
++	unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
++
++	if (!(flags & SSAM_REQUEST_UNSEQUENCED))
++		type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
++
++	ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
++			&ssh_rtl_packet_ops);
++
++	INIT_LIST_HEAD(&rqst->node);
++
++	rqst->state = 0;
++	if (flags & SSAM_REQUEST_HAS_RESPONSE)
++		rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
++
++	rqst->timestamp = KTIME_MAX;
++	rqst->ops = ops;
++}
++
++
++/**
++ * ssh_rtl_init() - Initialize request transport layer.
++ * @rtl:    The request transport layer to initialize.
++ * @serdev: The underlying serial device, i.e. the lower-level transport.
++ * @ops:    Request transport layer operations.
++ *
++ * Initializes the given request transport layer and associated packet
++ * transport layer. Transmitter and receiver threads must be started
++ * separately via ssh_rtl_tx_start() and ssh_rtl_rx_start(), after the
++ * request-layer has been initialized and the lower-level serial device layer
++ * has been set up.
++ *
++ * Return: Returns zero on success and a nonzero error code on failure.
++ */
++int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
++		 const struct ssh_rtl_ops *ops)
++{
++	struct ssh_ptl_ops ptl_ops;
++	int status;
++
++	ptl_ops.data_received = ssh_rtl_rx_data;
++
++	status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
++	if (status)
++		return status;
++
++	spin_lock_init(&rtl->queue.lock);
++	INIT_LIST_HEAD(&rtl->queue.head);
++
++	spin_lock_init(&rtl->pending.lock);
++	INIT_LIST_HEAD(&rtl->pending.head);
++	atomic_set_release(&rtl->pending.count, 0);
++
++	INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
++
++	rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
++	rtl->rtx_timeout.expires = KTIME_MAX;
++	INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
++
++	rtl->ops = *ops;
++
++	return 0;
++}
++
++/**
++ * ssh_rtl_destroy() - Deinitialize request transport layer.
++ * @rtl: The request transport layer to deinitialize.
++ *
++ * Deinitializes the given request transport layer and frees resources
++ * associated with it. If receiver and/or transmitter threads have been
++ * started, the layer must first be shut down via ssh_rtl_shutdown() before
++ * this function can be called.
++ */
++void ssh_rtl_destroy(struct ssh_rtl *rtl)
++{
++	ssh_ptl_destroy(&rtl->ptl);
++}
++
++/**
++ * ssh_rtl_tx_start() - Start request transmitter and receiver.
++ * @rtl: The request transport layer.
++ *
++ * Return: Returns zero on success, a negative error code on failure.
++ */
++int ssh_rtl_start(struct ssh_rtl *rtl)
++{
++	int status;
++
++	status = ssh_ptl_tx_start(&rtl->ptl);
++	if (status)
++		return status;
++
++	ssh_rtl_tx_schedule(rtl);
++
++	status = ssh_ptl_rx_start(&rtl->ptl);
++	if (status) {
++		ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
++		ssh_ptl_tx_stop(&rtl->ptl);
++		return status;
++	}
++
++	return 0;
++}
++
++struct ssh_flush_request {
++	struct ssh_request base;
++	struct completion completion;
++	int status;
++};
++
++static void ssh_rtl_flush_request_complete(struct ssh_request *r,
++					   const struct ssh_command *cmd,
++					   const struct ssam_span *data,
++					   int status)
++{
++	struct ssh_flush_request *rqst;
++
++	rqst = container_of(r, struct ssh_flush_request, base);
++	rqst->status = status;
++}
++
++static void ssh_rtl_flush_request_release(struct ssh_request *r)
++{
++	struct ssh_flush_request *rqst;
++
++	rqst = container_of(r, struct ssh_flush_request, base);
++	complete_all(&rqst->completion);
++}
++
++static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
++	.complete = ssh_rtl_flush_request_complete,
++	.release = ssh_rtl_flush_request_release,
++};
++
++/**
++ * ssh_rtl_flush() - Flush the request transport layer.
++ * @rtl:     request transport layer
++ * @timeout: timeout for the flush operation in jiffies
++ *
++ * Queue a special flush request and wait for its completion. This request
++ * will be completed after all other currently queued and pending requests
++ * have been completed. Instead of a normal data packet, this request submits
++ * a special flush packet, meaning that upon completion, also the underlying
++ * packet transport layer has been flushed.
++ *
++ * Flushing the request layer gurarantees that all previously submitted
++ * requests have been fully completed before this call returns. Additinally,
++ * flushing blocks execution of all later submitted requests until the flush
++ * has been completed.
++ *
++ * If the caller ensures that no new requests are submitted after a call to
++ * this function, the request transport layer is guaranteed to have no
++ * remaining requests when this call returns. The same guarantee does not hold
++ * for the packet layer, on which control packets may still be queued after
++ * this call.
++ *
++ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
++ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
++ * and/or request transport layer has been shut down before this call. May
++ * also return %-EINTR if the underlying packet transmission has been
++ * interrupted.
++ */
++int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
++{
++	const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
++	struct ssh_flush_request rqst;
++	int status;
++
++	ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
++	rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
++	rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
++	rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
++
++	init_completion(&rqst.completion);
++
++	status = ssh_rtl_submit(rtl, &rqst.base);
++	if (status)
++		return status;
++
++	ssh_request_put(&rqst.base);
++
++	if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
++		ssh_rtl_cancel(&rqst.base, true);
++		wait_for_completion(&rqst.completion);
++	}
++
++	WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED
++		&& rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
++
++	return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
++}
++
++
++/**
++ * ssh_rtl_shutdown() - Shut down request transport layer.
++ * @rtl: The request transport layer.
++ *
++ * Shuts down the request transport layer, removing and canceling all queued
++ * and pending requests. Requests canceled by this operation will be completed
++ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
++ * stopped, the lower-level packet layer will be shutdown.
++ *
++ * As a result of this function, the transport layer will be marked as shut
++ * down. Submission of requests after the transport layer has been shut down
++ * will fail with %-ESHUTDOWN.
++ */
++void ssh_rtl_shutdown(struct ssh_rtl *rtl)
++{
++	struct ssh_request *r, *n;
++	LIST_HEAD(claimed);
++	int pending;
++
++	set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
++	/*
++	 * Ensure that the layer gets marked as shut-down before actually
++	 * stopping it. In combination with the check in ssh_rtl_sunmit(), this
++	 * guarantees that no new requests can be added and all already queued
++	 * requests are properly cancelled.
++	 */
++	smp_mb__after_atomic();
++
++	// remove requests from queue
++	spin_lock(&rtl->queue.lock);
++	list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
++		set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++		// ensure state never gets zero
++		smp_mb__before_atomic();
++		clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
++
++		list_del(&r->node);
++		list_add_tail(&r->node, &claimed);
++	}
++	spin_unlock(&rtl->queue.lock);
++
++	/*
++	 * We have now guaranteed that the queue is empty and no more new
++	 * requests can be submitted (i.e. it will stay empty). This means that
++	 * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
++	 * we can simply call cancel_work_sync() on tx.work here and when that
++	 * returns, we've locked it down. This also means that after this call,
++	 * we don't submit any more packets to the underlying packet layer, so
++	 * we can also shut that down.
++	 */
++
++	cancel_work_sync(&rtl->tx.work);
++	ssh_ptl_shutdown(&rtl->ptl);
++	cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
++
++	/*
++	 * Shutting down the packet layer should also have caneled all requests.
++	 * Thus the pending set should be empty. Attempt to handle this
++	 * gracefully anyways, even though this should be dead code.
++	 */
++
++	pending = atomic_read(&rtl->pending.count);
++	if (WARN_ON(pending)) {
++		spin_lock(&rtl->pending.lock);
++		list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
++			set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
++			// ensure state never gets zero
++			smp_mb__before_atomic();
++			clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
++
++			list_del(&r->node);
++			list_add_tail(&r->node, &claimed);
++		}
++		spin_unlock(&rtl->pending.lock);
++	}
++
++	// finally, cancel and complete the requests we claimed before
++	list_for_each_entry_safe(r, n, &claimed, node) {
++		// test_and_set because we still might compete with cancellation
++		if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
++			ssh_rtl_complete_with_status(r, -ESHUTDOWN);
++
++		// drop reference we've obtained by removing it from the lists
++		list_del(&r->node);
++		ssh_request_put(r);
++	}
++}
+diff --git a/drivers/misc/surface_aggregator/ssh_request_layer.h b/drivers/misc/surface_aggregator/ssh_request_layer.h
+new file mode 100644
+index 000000000000..e945e0532628
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/ssh_request_layer.h
+@@ -0,0 +1,142 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * SSH request transport layer.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
++#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
++
++#include <linux/atomic.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++#include <linux/surface_aggregator/controller.h>
++
++#include "ssh_packet_layer.h"
++
++
++/**
++ * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl.
++ *
++ * @SSH_RTL_SF_SHUTDOWN_BIT:
++ *	Indicates that the request transport layer has been shut down or is
++ *	being shut down and should not accept any new requests.
++ */
++enum ssh_rtl_state_flags {
++	SSH_RTL_SF_SHUTDOWN_BIT,
++};
++
++/**
++ * struct ssh_rtl_ops - Callback operations for request transport layer.
++ * @handle_event: Function called when a SSH event has been received. The
++ *                specified function takes the request layer, received command
++ *                struct, and corresponding payload as arguments. If the event
++ *                has no payload, the payload span is empty (not %NULL).
++ */
++struct ssh_rtl_ops {
++	void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
++			     const struct ssam_span *data);
++};
++
++/**
++ * struct ssh_rtl - SSH request transport layer.
++ * @ptl:           Underlying packet transport layer.
++ * @state:         State(-flags) of the transport layer.
++ * @queue:         Request submission queue.
++ * @queue.lock:    Lock for modifying the request submission queue.
++ * @queue.head:    List-head of the request submission queue.
++ * @pending:       Set/list of pending requests.
++ * @pending.lock:  Lock for modifying the request set.
++ * @pending.head:  List-head of the pending set/list.
++ * @pending.count: Number of currently pending requests.
++ * @tx:            Transmitter subsystem.
++ * @tx.work:       Transmitter work item.
++ * @rtx_timeout:   Retransmission timeout subsystem.
++ * @rtx_timeout.timeout: Timeout inverval for retransmission.
++ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
++ * @rtx_timeout.reaper:  Work performing timeout checks and subsequent actions.
++ * @ops:           Request layer operations.
++ */
++struct ssh_rtl {
++	struct ssh_ptl ptl;
++	unsigned long state;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++	} queue;
++
++	struct {
++		spinlock_t lock;
++		struct list_head head;
++		atomic_t count;
++	} pending;
++
++	struct {
++		struct work_struct work;
++	} tx;
++
++	struct {
++		ktime_t timeout;
++		ktime_t expires;
++		struct delayed_work reaper;
++	} rtx_timeout;
++
++	struct ssh_rtl_ops ops;
++};
++
++#define rtl_dbg(r, fmt, ...)  ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_err(r, fmt, ...)  ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
++#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
++
++#define to_ssh_rtl(ptr, member) \
++	container_of(ptr, struct ssh_rtl, member)
++
++/**
++ * ssh_rtl_get_device() - Get device associated with request transport layer.
++ * @rtl: The request transport layer.
++ *
++ * Return: Returns the device on which the given request transport layer
++ * builds upon.
++ */
++static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
++{
++	return ssh_ptl_get_device(&rtl->ptl);
++}
++
++/**
++ * ssh_request_rtl() - Get request transport layer associated with request.
++ * @rqst: The request to get the request transport layer reference for.
++ *
++ * Return: Returns the &struct ssh_rtl associated with the given SSH request.
++ */
++static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
++{
++	struct ssh_ptl *ptl;
++
++	ptl = READ_ONCE(rqst->packet.ptl);
++	return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
++}
++
++int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
++bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
++
++int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
++		 const struct ssh_rtl_ops *ops);
++
++int ssh_rtl_start(struct ssh_rtl *rtl);
++int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
++void ssh_rtl_shutdown(struct ssh_rtl *rtl);
++void ssh_rtl_destroy(struct ssh_rtl *rtl);
++
++void ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
++		      const struct ssh_request_ops *ops);
++
++#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */
+diff --git a/drivers/misc/surface_aggregator/trace.h b/drivers/misc/surface_aggregator/trace.h
+new file mode 100644
+index 000000000000..232bf1142aae
+--- /dev/null
++++ b/drivers/misc/surface_aggregator/trace.h
+@@ -0,0 +1,625 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Trace points for SSAM/SSH.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM surface_aggregator
++
++#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _SURFACE_AGGREGATOR_TRACE_H
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++#include <asm/unaligned.h>
++#include <linux/tracepoint.h>
++
++
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
++TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
++
++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
++
++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
++
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
++TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
++
++
++#define SSAM_PTR_UID_LEN		9
++#define SSAM_U8_FIELD_NOT_APPLICABLE	((u16)-1)
++#define SSAM_SEQ_NOT_APPLICABLE		((u16)-1)
++#define SSAM_RQID_NOT_APPLICABLE	((u32)-1)
++#define SSAM_SSH_TC_NOT_APPLICABLE	0
++
++
++#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
++#define _SURFACE_AGGREGATOR_TRACE_HELPERS
++
++/**
++ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
++ * @ptr: The pointer to convert.
++ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
++ *
++ * Converts the given pointer into a UID string that is safe to be shared
++ * with userspace and logs, i.e. doesn't give away the real memory location.
++ */
++static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
++{
++	char buf[2 * sizeof(void *) + 1];
++
++	snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
++	memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
++	       SSAM_PTR_UID_LEN);
++}
++
++/**
++ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's sequence ID (SEQ) field if present, or
++ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
++ */
++static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
++		return SSAM_SEQ_NOT_APPLICABLE;
++
++	return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
++}
++
++/**
++ * ssam_trace_get_request_id() - Read the packet's request ID.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's request ID (RQID) field if the packet
++ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
++ * (e.g. flush request, control packet).
++ */
++static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
++		return SSAM_RQID_NOT_APPLICABLE;
++
++	return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
++}
++
++/**
++ * ssam_trace_get_request_tc() - Read the packet's request target category.
++ * @p: The packet.
++ *
++ * Return: Returns the packet's request target category (TC) field if the
++ * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
++ * if not (e.g. flush request, control packet).
++ */
++static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
++{
++	if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
++		return SSAM_SSH_TC_NOT_APPLICABLE;
++
++	return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
++}
++
++#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */
++
++#define ssam_trace_get_command_field_u8(packet, field) \
++	((!packet || packet->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
++	 ? 0 : p->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
++
++#define ssam_show_generic_u8_field(value)				\
++	__print_symbolic(value,						\
++		{ SSAM_U8_FIELD_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++
++#define ssam_show_frame_type(ty)					\
++	__print_symbolic(ty,						\
++		{ SSH_FRAME_TYPE_DATA_SEQ,		"DSEQ" },	\
++		{ SSH_FRAME_TYPE_DATA_NSQ,		"DNSQ" },	\
++		{ SSH_FRAME_TYPE_ACK,			"ACK"  },	\
++		{ SSH_FRAME_TYPE_NAK,			"NAK"  }	\
++	)
++
++#define ssam_show_packet_type(type)					\
++	__print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "",		\
++		{ BIT(SSH_PACKET_TY_FLUSH_BIT),		"F" },		\
++		{ BIT(SSH_PACKET_TY_SEQUENCED_BIT),	"S" },		\
++		{ BIT(SSH_PACKET_TY_BLOCKING_BIT),	"B" }		\
++	)
++
++#define ssam_show_packet_state(state)					\
++	__print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "",		\
++		{ BIT(SSH_PACKET_SF_LOCKED_BIT),	"L" },		\
++		{ BIT(SSH_PACKET_SF_QUEUED_BIT),	"Q" },		\
++		{ BIT(SSH_PACKET_SF_PENDING_BIT),	"P" },		\
++		{ BIT(SSH_PACKET_SF_TRANSMITTING_BIT),	"S" },		\
++		{ BIT(SSH_PACKET_SF_TRANSMITTED_BIT),	"T" },		\
++		{ BIT(SSH_PACKET_SF_ACKED_BIT),		"A" },		\
++		{ BIT(SSH_PACKET_SF_CANCELED_BIT),	"C" },		\
++		{ BIT(SSH_PACKET_SF_COMPLETED_BIT),	"F" }		\
++	)
++
++#define ssam_show_packet_seq(seq)					\
++	__print_symbolic(seq,						\
++		{ SSAM_SEQ_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++
++#define ssam_show_request_type(flags)					\
++	__print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "",		\
++		{ BIT(SSH_REQUEST_TY_FLUSH_BIT),	"F" },		\
++		{ BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),	"R" }		\
++	)
++
++#define ssam_show_request_state(flags)					\
++	__print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "",		\
++		{ BIT(SSH_REQUEST_SF_LOCKED_BIT),	"L" },		\
++		{ BIT(SSH_REQUEST_SF_QUEUED_BIT),	"Q" },		\
++		{ BIT(SSH_REQUEST_SF_PENDING_BIT),	"P" },		\
++		{ BIT(SSH_REQUEST_SF_TRANSMITTING_BIT),	"S" },		\
++		{ BIT(SSH_REQUEST_SF_TRANSMITTED_BIT),	"T" },		\
++		{ BIT(SSH_REQUEST_SF_RSPRCVD_BIT),	"A" },		\
++		{ BIT(SSH_REQUEST_SF_CANCELED_BIT),	"C" },		\
++		{ BIT(SSH_REQUEST_SF_COMPLETED_BIT),	"F" }		\
++	)
++
++#define ssam_show_request_id(rqid)					\
++	__print_symbolic(rqid,						\
++		{ SSAM_RQID_NOT_APPLICABLE,		"N/A" }		\
++	)
++
++#define ssam_show_ssh_tc(rqid)						\
++	__print_symbolic(rqid,						\
++		{ SSAM_SSH_TC_NOT_APPLICABLE,		"N/A" },	\
++		{ SSAM_SSH_TC_SAM,			"SAM" },	\
++		{ SSAM_SSH_TC_BAT,			"BAT" },	\
++		{ SSAM_SSH_TC_TMP,			"TMP" },	\
++		{ SSAM_SSH_TC_PMC,			"PMC" },	\
++		{ SSAM_SSH_TC_FAN,			"FAN" },	\
++		{ SSAM_SSH_TC_PoM,			"PoM" },	\
++		{ SSAM_SSH_TC_DBG,			"DBG" },	\
++		{ SSAM_SSH_TC_KBD,			"KBD" },	\
++		{ SSAM_SSH_TC_FWU,			"FWU" },	\
++		{ SSAM_SSH_TC_UNI,			"UNI" },	\
++		{ SSAM_SSH_TC_LPC,			"LPC" },	\
++		{ SSAM_SSH_TC_TCL,			"TCL" },	\
++		{ SSAM_SSH_TC_SFL,			"SFL" },	\
++		{ SSAM_SSH_TC_KIP,			"KIP" },	\
++		{ SSAM_SSH_TC_EXT,			"EXT" },	\
++		{ SSAM_SSH_TC_BLD,			"BLD" },	\
++		{ SSAM_SSH_TC_BAS,			"BAS" },	\
++		{ SSAM_SSH_TC_SEN,			"SEN" },	\
++		{ SSAM_SSH_TC_SRQ,			"SRQ" },	\
++		{ SSAM_SSH_TC_MCU,			"MCU" },	\
++		{ SSAM_SSH_TC_HID,			"HID" },	\
++		{ SSAM_SSH_TC_TCH,			"TCH" },	\
++		{ SSAM_SSH_TC_BKL,			"BKL" },	\
++		{ SSAM_SSH_TC_TAM,			"TAM" },	\
++		{ SSAM_SSH_TC_ACC,			"ACC" },	\
++		{ SSAM_SSH_TC_UFI,			"UFI" },	\
++		{ SSAM_SSH_TC_USC,			"USC" },	\
++		{ SSAM_SSH_TC_PEN,			"PEN" },	\
++		{ SSAM_SSH_TC_VID,			"VID" },	\
++		{ SSAM_SSH_TC_AUD,			"AUD" },	\
++		{ SSAM_SSH_TC_SMC,			"SMC" },	\
++		{ SSAM_SSH_TC_KPD,			"KPD" },	\
++		{ SSAM_SSH_TC_REG,			"REG" }		\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_frame_class,
++	TP_PROTO(const struct ssh_frame *frame),
++
++	TP_ARGS(frame),
++
++	TP_STRUCT__entry(
++		__field(u8, type)
++		__field(u8, seq)
++		__field(u16, len)
++	),
++
++	TP_fast_assign(
++		__entry->type = frame->type;
++		__entry->seq = frame->seq;
++		__entry->len = get_unaligned_le16(&frame->len);
++	),
++
++	TP_printk("ty=%s, seq=0x%02x, len=%u",
++		ssam_show_frame_type(__entry->type),
++		__entry->seq,
++		__entry->len
++	)
++);
++
++#define DEFINE_SSAM_FRAME_EVENT(name)				\
++	DEFINE_EVENT(ssam_frame_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_frame *frame),	\
++		TP_ARGS(frame)					\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_command_class,
++	TP_PROTO(const struct ssh_command *cmd, u16 len),
++
++	TP_ARGS(cmd, len),
++
++	TP_STRUCT__entry(
++		__field(u16, rqid)
++		__field(u16, len)
++		__field(u8, tc)
++		__field(u8, cid)
++		__field(u8, iid)
++	),
++
++	TP_fast_assign(
++		__entry->rqid = get_unaligned_le16(&cmd->rqid);
++		__entry->tc = cmd->tc;
++		__entry->cid = cmd->cid;
++		__entry->iid = cmd->iid;
++		__entry->len = len;
++	),
++
++	TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u",
++		__entry->rqid,
++		ssam_show_ssh_tc(__entry->tc),
++		__entry->cid,
++		__entry->iid,
++		__entry->len
++	)
++);
++
++#define DEFINE_SSAM_COMMAND_EVENT(name)					\
++	DEFINE_EVENT(ssam_command_class, ssam_##name,			\
++		TP_PROTO(const struct ssh_command *cmd, u16 len),	\
++		TP_ARGS(cmd, len)					\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_packet_class,
++	TP_PROTO(const struct ssh_packet *packet),
++
++	TP_ARGS(packet),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, priority)
++		__field(u16, length)
++		__field(u16, seq)
++	),
++
++	TP_fast_assign(
++		__entry->state = READ_ONCE(packet->state);
++		ssam_trace_ptr_uid(packet, __entry->uid);
++		__entry->priority = READ_ONCE(packet->priority);
++		__entry->length = packet->data.len;
++		__entry->seq = ssam_trace_get_packet_seq(packet);
++	),
++
++	TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s",
++		__entry->uid,
++		ssam_show_packet_seq(__entry->seq),
++		ssam_show_packet_type(__entry->state),
++		__entry->priority,
++		__entry->length,
++		ssam_show_packet_state(__entry->state)
++	)
++);
++
++#define DEFINE_SSAM_PACKET_EVENT(name)				\
++	DEFINE_EVENT(ssam_packet_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_packet *packet),	\
++		TP_ARGS(packet)					\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_packet_status_class,
++	TP_PROTO(const struct ssh_packet *packet, int status),
++
++	TP_ARGS(packet, status),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(int, status)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, priority)
++		__field(u16, length)
++		__field(u16, seq)
++	),
++
++	TP_fast_assign(
++		__entry->state = READ_ONCE(packet->state);
++		__entry->status = status;
++		ssam_trace_ptr_uid(packet, __entry->uid);
++		__entry->priority = READ_ONCE(packet->priority);
++		__entry->length = packet->data.len;
++		__entry->seq = ssam_trace_get_packet_seq(packet);
++	),
++
++	TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d",
++		__entry->uid,
++		ssam_show_packet_seq(__entry->seq),
++		ssam_show_packet_type(__entry->state),
++		__entry->priority,
++		__entry->length,
++		ssam_show_packet_state(__entry->state),
++		__entry->status
++	)
++);
++
++#define DEFINE_SSAM_PACKET_STATUS_EVENT(name)				\
++	DEFINE_EVENT(ssam_packet_status_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_packet *packet, int status),	\
++		TP_ARGS(packet, status)					\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_request_class,
++	TP_PROTO(const struct ssh_request *request),
++
++	TP_ARGS(request),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(u32, rqid)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, tc)
++		__field(u16, cid)
++		__field(u16, iid)
++	),
++
++	TP_fast_assign(
++		const struct ssh_packet *p = &request->packet;
++
++		// use packet for UID so we can match requests to packets
++		__entry->state = READ_ONCE(request->state);
++		__entry->rqid = ssam_trace_get_request_id(p);
++		ssam_trace_ptr_uid(p, __entry->uid);
++		__entry->tc = ssam_trace_get_request_tc(p);
++		__entry->cid = ssam_trace_get_command_field_u8(p, cid);
++		__entry->iid = ssam_trace_get_command_field_u8(p, iid);
++	),
++
++	TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
++		__entry->uid,
++		ssam_show_request_id(__entry->rqid),
++		ssam_show_request_type(__entry->state),
++		ssam_show_request_state(__entry->state),
++		ssam_show_ssh_tc(__entry->tc),
++		ssam_show_generic_u8_field(__entry->cid),
++		ssam_show_generic_u8_field(__entry->iid)
++	)
++);
++
++#define DEFINE_SSAM_REQUEST_EVENT(name)				\
++	DEFINE_EVENT(ssam_request_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_request *request),	\
++		TP_ARGS(request)				\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_request_status_class,
++	TP_PROTO(const struct ssh_request *request, int status),
++
++	TP_ARGS(request, status),
++
++	TP_STRUCT__entry(
++		__field(unsigned long, state)
++		__field(u32, rqid)
++		__field(int, status)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++		__field(u8, tc)
++		__field(u16, cid)
++		__field(u16, iid)
++	),
++
++	TP_fast_assign(
++		const struct ssh_packet *p = &request->packet;
++
++		// use packet for UID so we can match requests to packets
++		__entry->state = READ_ONCE(request->state);
++		__entry->rqid = ssam_trace_get_request_id(p);
++		__entry->status = status;
++		ssam_trace_ptr_uid(p, __entry->uid);
++		__entry->tc = ssam_trace_get_request_tc(p);
++		__entry->cid = ssam_trace_get_command_field_u8(p, cid);
++		__entry->iid = ssam_trace_get_command_field_u8(p, iid);
++	),
++
++	TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
++		__entry->uid,
++		ssam_show_request_id(__entry->rqid),
++		ssam_show_request_type(__entry->state),
++		ssam_show_request_state(__entry->state),
++		ssam_show_ssh_tc(__entry->tc),
++		ssam_show_generic_u8_field(__entry->cid),
++		ssam_show_generic_u8_field(__entry->iid),
++		__entry->status
++	)
++);
++
++#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name)				\
++	DEFINE_EVENT(ssam_request_status_class, ssam_##name,		\
++		TP_PROTO(const struct ssh_request *request, int status),\
++		TP_ARGS(request, status)				\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_alloc_class,
++	TP_PROTO(void *ptr, size_t len),
++
++	TP_ARGS(ptr, len),
++
++	TP_STRUCT__entry(
++		__field(size_t, len)
++		__array(char, uid, SSAM_PTR_UID_LEN)
++	),
++
++	TP_fast_assign(
++		__entry->len = len;
++		ssam_trace_ptr_uid(ptr, __entry->uid);
++	),
++
++	TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
++);
++
++#define DEFINE_SSAM_ALLOC_EVENT(name)					\
++	DEFINE_EVENT(ssam_alloc_class, ssam_##name,			\
++		TP_PROTO(void *ptr, size_t len),			\
++		TP_ARGS(ptr, len)					\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_free_class,
++	TP_PROTO(void *ptr),
++
++	TP_ARGS(ptr),
++
++	TP_STRUCT__entry(
++		__array(char, uid, SSAM_PTR_UID_LEN)
++	),
++
++	TP_fast_assign(
++		ssam_trace_ptr_uid(ptr, __entry->uid);
++	),
++
++	TP_printk("uid=%s", __entry->uid)
++);
++
++#define DEFINE_SSAM_FREE_EVENT(name)					\
++	DEFINE_EVENT(ssam_free_class, ssam_##name,			\
++		TP_PROTO(void *ptr),					\
++		TP_ARGS(ptr)						\
++	)
++
++
++DECLARE_EVENT_CLASS(ssam_generic_uint_class,
++	TP_PROTO(const char *property, unsigned int value),
++
++	TP_ARGS(property, value),
++
++	TP_STRUCT__entry(
++		__field(unsigned int, value)
++		__string(property, property)
++	),
++
++	TP_fast_assign(
++		__entry->value = value;
++		__assign_str(property, property);
++	),
++
++	TP_printk("%s=%u", __get_str(property), __entry->value)
++);
++
++#define DEFINE_SSAM_GENERIC_UINT_EVENT(name)				\
++	DEFINE_EVENT(ssam_generic_uint_class, ssam_##name,		\
++		TP_PROTO(const char *property, unsigned int value),	\
++		TP_ARGS(property, value)				\
++	)
++
++
++DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
++DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
++DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
++
++DEFINE_SSAM_PACKET_EVENT(packet_release);
++DEFINE_SSAM_PACKET_EVENT(packet_submit);
++DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
++DEFINE_SSAM_PACKET_EVENT(packet_timeout);
++DEFINE_SSAM_PACKET_EVENT(packet_cancel);
++DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
++DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap);
++
++DEFINE_SSAM_REQUEST_EVENT(request_submit);
++DEFINE_SSAM_REQUEST_EVENT(request_timeout);
++DEFINE_SSAM_REQUEST_EVENT(request_cancel);
++DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
++DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap);
++
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
++DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
++DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
++DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn);
++DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
++DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
++
++DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
++DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
++
++DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
++DEFINE_SSAM_FREE_EVENT(event_item_free);
++
++#endif /* _SURFACE_AGGREGATOR_TRACE_H */
++
++/* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace
++
++#include <trace/define_trace.h>
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index ef64063fac30..0b8f1feefe0e 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -840,15 +840,16 @@ struct mhi_device_id {
+ 
+ /* Surface System Aggregator Module */
+ 
+-#define SSAM_MATCH_CHANNEL	0x1
++#define SSAM_MATCH_TARGET	0x1
+ #define SSAM_MATCH_INSTANCE	0x2
+ #define SSAM_MATCH_FUNCTION	0x4
+ 
+ struct ssam_device_id {
+ 	__u8 match_flags;
+ 
++	__u8 domain;
+ 	__u8 category;
+-	__u8 channel;
++	__u8 target;
+ 	__u8 instance;
+ 	__u8 function;
+ 
+diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
+new file mode 100644
+index 000000000000..8e3e86c7d78c
+--- /dev/null
++++ b/include/linux/surface_acpi_notify.h
+@@ -0,0 +1,39 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Interface for Surface ACPI Notify (SAN) driver.
++ *
++ * Provides access to discrete GPU notifications sent from ACPI via the SAN
++ * driver, which are not handled by this driver directly.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
++#define _LINUX_SURFACE_ACPI_NOTIFY_H
++
++#include <linux/notifier.h>
++#include <linux/types.h>
++
++/**
++ * struct san_dgpu_event - Discrete GPU ACPI event.
++ * @category: Category of the event.
++ * @target:   Target ID of the event source.
++ * @command:  Command ID of the event.
++ * @instance: Instance ID of the event source.
++ * @length:   Length of the event's payload data (in bytes).
++ * @payload:  Pointer to the event's payload data.
++ */
++struct san_dgpu_event {
++	u8 category;
++	u8 target;
++	u8 command;
++	u8 instance;
++	u16 length;
++	u8 *payload;
++};
++
++int san_client_link(struct device *client);
++int san_dgpu_notifier_register(struct notifier_block *nb);
++int san_dgpu_notifier_unregister(struct notifier_block *nb);
++
++#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
+diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
+new file mode 100644
+index 000000000000..447cda590409
+--- /dev/null
++++ b/include/linux/surface_aggregator/controller.h
+@@ -0,0 +1,815 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module (SSAM) controller interface.
++ *
++ * Main communication interface for the SSAM EC. Provides a controller
++ * managing access and communication to and from the SSAM EC, as well as main
++ * communication structures and definitions.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
++#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
++
++#include <linux/completion.h>
++#include <linux/device.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/serial_hub.h>
++
++
++/* -- Main data types and definitions --------------------------------------- */
++
++/**
++ * enum ssam_event_flags - Flags for enabling/disabling SAM events
++ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
++ */
++enum ssam_event_flags {
++	SSAM_EVENT_SEQUENCED = BIT(0),
++};
++
++/**
++ * struct ssam_event - SAM event sent from the EC to the host.
++ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
++ * @target_id:       Target ID of the event source.
++ * @command_id:      Command ID of the event.
++ * @instance_id:     Instance ID of the event source.
++ * @length:          Length of the event payload in bytes.
++ * @data:            Event payload data.
++ */
++struct ssam_event {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u16 length;
++	u8 data[];
++};
++
++/**
++ * enum ssam_request_flags - Flags for SAM requests.
++ *
++ * @SSAM_REQUEST_HAS_RESPONSE:
++ *	Specifies that the request expects a response. If not set, the request
++ *	will be directly completed after its underlying packet has been
++ *	transmitted. If set, the request transport system waits for a response
++ *	of the request.
++ *
++ * @SSAM_REQUEST_UNSEQUENCED:
++ *	Specifies that the request should be transmitted via an unsequenced
++ *	packet. If set, the request must not have a response, meaning that this
++ *	flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
++ */
++enum ssam_request_flags {
++	SSAM_REQUEST_HAS_RESPONSE = BIT(0),
++	SSAM_REQUEST_UNSEQUENCED  = BIT(1),
++};
++
++/**
++ * struct ssam_request - SAM request description.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @target_id:       ID of the request's target.
++ * @command_id:      Command ID of the request.
++ * @instance_id:     Instance ID of the request's target.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ * @length:          Length of the request payload in bytes.
++ * @payload:         Request payload data.
++ *
++ * This struct fully describes a SAM request with payload. It is intended to
++ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
++ * and specifically its raw message data via ssam_request_write_data().
++ */
++struct ssam_request {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u16 flags;
++	u16 length;
++	const u8 *payload;
++};
++
++/**
++ * struct ssam_response - Response buffer for SAM request.
++ * @capacity: Capacity of the buffer, in bytes.
++ * @length:   Length of the actual data stored in the memory pointed to by
++ *            @pointer, in bytes. Set by the transport system.
++ * @pointer:  Pointer to the buffer's memory, storing the response payload data.
++ */
++struct ssam_response {
++	size_t capacity;
++	size_t length;
++	u8 *pointer;
++};
++
++struct ssam_controller;
++
++
++struct ssam_controller *ssam_get_controller(void);
++int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
++int ssam_client_bind(struct device *client, struct ssam_controller **ctrl);
++
++struct device *ssam_controller_device(struct ssam_controller *c);
++
++struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
++void ssam_controller_put(struct ssam_controller *c);
++
++void ssam_controller_statelock(struct ssam_controller *c);
++void ssam_controller_stateunlock(struct ssam_controller *c);
++
++ssize_t ssam_request_write_data(struct ssam_span *buf,
++				struct ssam_controller *ctrl,
++				const struct ssam_request *spec);
++
++
++/* -- Synchronous request interface. ---------------------------------------- */
++
++/**
++ * struct ssam_request_sync - Synchronous SAM request struct.
++ * @base:   Underlying SSH request.
++ * @comp:   Completion used to signal full completion of the request. After the
++ *          request has been submitted, this struct may only be modified or
++ *          deallocated after the completion has been signaled.
++ *          request has been submitted,
++ * @resp:   Buffer to store the response.
++ * @status: Status of the request, set after the base request has been
++ *          completed or has failed.
++ */
++struct ssam_request_sync {
++	struct ssh_request base;
++	struct completion comp;
++	struct ssam_response *resp;
++	int status;
++};
++
++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
++			    struct ssam_request_sync **rqst,
++			    struct ssam_span *buffer);
++
++void ssam_request_sync_free(struct ssam_request_sync *rqst);
++
++void ssam_request_sync_init(struct ssam_request_sync *rqst,
++			    enum ssam_request_flags flags);
++
++/**
++ * ssam_request_sync_set_data - Set message data of a synchronous request.
++ * @rqst: The request.
++ * @ptr:  Pointer to the request message data.
++ * @len:  Length of the request message data.
++ *
++ * Set the request message data of a synchronous request. The provided buffer
++ * needs to live until the request has been completed.
++ */
++static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
++					      u8 *ptr, size_t len)
++{
++	ssh_request_set_data(&rqst->base, ptr, len);
++}
++
++/**
++ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
++ * @rqst: The request.
++ * @resp: The response buffer.
++ *
++ * Sets the response buffer ot a synchronous request. This buffer will store
++ * the response of the request after it has been completed. May be %NULL if
++ * no response is expected.
++ */
++static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
++					      struct ssam_response *resp)
++{
++	rqst->resp = resp;
++}
++
++int ssam_request_sync_submit(struct ssam_controller *ctrl,
++			     struct ssam_request_sync *rqst);
++
++/**
++ * ssam_request_sync_wait - Wait for completion of a synchronous request.
++ * @rqst: The request to wait for.
++ *
++ * Wait for completion and release of a synchronous request. After this
++ * function terminates, the request is guaranteed to have left the transport
++ * system. After successful submission of a request, this function must be
++ * called before accessing the response of the request, freeing the request,
++ * or freeing any of the buffers associated with the request.
++ *
++ * This function must not be called if the request has not been submitted yet
++ * and may lead to a deadlock/infinite wait if a subsequent request submission
++ * fails in that case, due to the completion never triggering.
++ *
++ * Return: Returns the status of the given request, which is set on completion
++ * of the packet. This value is zero on success and negative on failure.
++ */
++static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
++{
++	wait_for_completion(&rqst->comp);
++	return rqst->status;
++}
++
++int ssam_request_sync(struct ssam_controller *ctrl,
++		      const struct ssam_request *spec,
++		      struct ssam_response *rsp);
++
++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
++				  const struct ssam_request *spec,
++				  struct ssam_response *rsp,
++				  struct ssam_span *buf);
++
++
++/**
++ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
++ * @ctrl: The controller via which the request is submitted.
++ * @rqst: The request specification.
++ * @rsp:  The response buffer.
++ * @payload_len: The (maximum) request payload length.
++ *
++ * Allocates a synchronous request with specified payload length on the stack,
++ * fully intializes it via the provided request specification, submits it, and
++ * finally waits for its completion before returning its status. This helper
++ * macro essentially allocates the request message buffer on the stack and
++ * then calls ssam_request_sync_with_buffer().
++ *
++ * Note: The @payload_len parameter specifies the maximum payload length, used
++ * for buffer allocation. The actual payload length may be smaller.
++ *
++ * Return: Returns the status of the request or any failure during setup, i.e.
++ * zero on success and a negative value on failure.
++ */
++#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len)			\
++	({									\
++		u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)];		\
++		struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) };	\
++										\
++		ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf);		\
++	})
++
++/**
++ * ssam_retry - Retry request in case of I/O errors or timeouts.
++ * @request: The request function to execute. Must return an integer.
++ * @n:       Number of tries.
++ * @args:    Arguments for the request function.
++ *
++ * Executes the given request function, i.e. calls @request. In case the
++ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
++ * or underlying packet timed out), @request will be re-executed again, up to
++ * @n times in total.
++ *
++ * Return: Returns the return value of the last execution of @request.
++ */
++#define ssam_retry(request, n, args...)					\
++	({								\
++		int __i, __s = 0;					\
++									\
++		for (__i = (n); __i > 0; __i--) {			\
++			__s = request(args);				\
++			if (__s != -ETIMEDOUT && __s != -EREMOTEIO)	\
++				break;					\
++		}							\
++		__s;							\
++	})
++
++
++/**
++ * struct ssam_request_spec - Blue-print specification of SAM request.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @target_id:       ID of the request's target.
++ * @command_id:      Command ID of the request.
++ * @instance_id:     Instance ID of the request's target.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ *
++ * Blue-print specification for a SAM request. This struct describes the
++ * unique static parameters of a request (i.e. type) without specifying any of
++ * its instance-specific data (e.g. payload). It is intended to be used as base
++ * for defining simple request functions via the
++ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
++ */
++struct ssam_request_spec {
++	u8 target_category;
++	u8 target_id;
++	u8 command_id;
++	u8 instance_id;
++	u8 flags;
++};
++
++/**
++ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
++ * request.
++ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
++ * @command_id:      Command ID of the request.
++ * @flags:           Flags for the request. See &enum ssam_request_flags.
++ *
++ * Blue-print specification for a multi-device SAM request, i.e. a request
++ * that is applicable to multiple device instances, described by their
++ * individual target and instance IDs. This struct describes the unique static
++ * parameters of a request (i.e. type) without specifying any of its
++ * instance-specific data (e.g. payload) and without specifying any of its
++ * device specific IDs (i.e. target and instance ID). It is intended to be
++ * used as base for defining simple multi-device request functions via the
++ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
++ * families of macros.
++ */
++struct ssam_request_spec_md {
++	u8 target_category;
++	u8 command_id;
++	u8 flags;
++};
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
++ * with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. The
++ * generated function takes care of setting up the request struct and buffer
++ * allocation, as well as execution of the request itself, returning once the
++ * request has been fully completed. The required transport buffer will be
++ * allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl)``, returning the status of the request, which is zero on success and
++ * negative on failure. The ``ctrl`` parameter is the controller via which the
++ * request is being sent.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...)				\
++	int name(struct ssam_controller *ctrl)					\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags;						\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
++ * argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. The generated function takes care of setting up the request
++ * struct, buffer allocation, as well as execution of the request itself,
++ * returning once the request has been fully completed. The required transport
++ * buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, const atype *arg)``, returning the status of the request, which is
++ * zero on success and negative on failure. The ``ctrl`` parameter is the
++ * controller via which the request is sent. The request argument is specified
++ * via the ``arg`` pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...)			\
++	int name(struct ssam_controller *ctrl, const atype *arg)		\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags;						\
++		rqst.length = sizeof(atype);					\
++		rqst.payload = (u8 *)arg;					\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL,		\
++						 sizeof(atype));		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
++ * return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. The generated function takes care of setting up the request
++ * and response structs, buffer allocation, as well as execution of the
++ * request itself, returning once the request has been fully completed. The
++ * required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, rtype *ret)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``ctrl`` parameter is the controller
++ * via which the request is sent. The request's return value is written to the
++ * memory pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...)			\
++	int name(struct ssam_controller *ctrl, rtype *ret)			\
++	{									\
++		struct ssam_request_spec s = (struct ssam_request_spec)spec;	\
++		struct ssam_request rqst;					\
++		struct ssam_response rsp;					\
++		int status;							\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = s.target_id;					\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = s.instance_id;				\
++		rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE;		\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		rsp.capacity = sizeof(rtype);					\
++		rsp.length = 0;							\
++		rsp.pointer = (u8 *)ret;					\
++										\
++		status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0);	\
++		if (status)							\
++			return status;						\
++										\
++		if (rsp.length != sizeof(rtype)) {				\
++			struct device *dev = ssam_controller_device(ctrl);	\
++			dev_err(dev, "rqst: invalid response length, expected "	\
++				"%zu, got %zu (tc: 0x%02x, cid: 0x%02x)",	\
++				sizeof(rtype), rsp.length, rqst.target_category,\
++				rqst.command_id);				\
++			return -EIO;						\
++		}								\
++										\
++		return 0;							\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
++ * request function with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. Device
++ * specifying parameters are not hard-coded, but instead must be provided to
++ * the function. The generated function takes care of setting up the request
++ * struct, buffer allocation, as well as execution of the request itself,
++ * returning once the request has been fully completed. The required transport
++ * buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is
++ * zero on success and negative on failure. The ``ctrl`` parameter is the
++ * controller via which the request is sent, ``tid`` the target ID for the
++ * request, and ``iid`` the instance ID.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...)				\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid)			\
++	{									\
++		struct ssam_request_spec_md s					\
++			= (struct ssam_request_spec_md)spec;			\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags;						\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
++ * request function with argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. Device specifying parameters are not hard-coded, but instead
++ * must be provided to the function. The generated function takes care of
++ * setting up the request struct, buffer allocation, as well as execution of
++ * the request itself, returning once the request has been fully completed.
++ * The required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the
++ * request, which is zero on success and negative on failure. The ``ctrl``
++ * parameter is the controller via which the request is sent, ``tid`` the
++ * target ID for the request, and ``iid`` the instance ID. The request argument
++ * is specified via the ``arg`` pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...)			\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\
++	{									\
++		struct ssam_request_spec_md s					\
++			= (struct ssam_request_spec_md)spec;			\
++		struct ssam_request rqst;					\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags;						\
++		rqst.length = sizeof(atype);					\
++		rqst.payload = (u8 *)arg;					\
++										\
++		return ssam_request_sync_onstack(ctrl, &rqst, NULL,		\
++						 sizeof(atype));		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
++ * request function with return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. Device specifying parameters are not hard-coded, but instead
++ * must be provided to the function. The generated function takes care of
++ * setting up the request and response structs, buffer allocation, as well as
++ * execution of the request itself, returning once the request has been fully
++ * completed. The required transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_controller
++ * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request,
++ * which is zero on success and negative on failure. The ``ctrl`` parameter is
++ * the controller via which the request is sent, ``tid`` the target ID for the
++ * request, and ``iid`` the instance ID. The request's return value is written
++ * to the memory pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...)			\
++	int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)	\
++	{									\
++		struct ssam_request_spec_md s					\
++			= (struct ssam_request_spec_md)spec;			\
++		struct ssam_request rqst;					\
++		struct ssam_response rsp;					\
++		int status;							\
++										\
++		rqst.target_category = s.target_category;			\
++		rqst.target_id = tid;						\
++		rqst.command_id = s.command_id;					\
++		rqst.instance_id = iid;						\
++		rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE;		\
++		rqst.length = 0;						\
++		rqst.payload = NULL;						\
++										\
++		rsp.capacity = sizeof(rtype);					\
++		rsp.length = 0;							\
++		rsp.pointer = (u8 *)ret;					\
++										\
++		status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0);	\
++		if (status)							\
++			return status;						\
++										\
++		if (rsp.length != sizeof(rtype)) {				\
++			struct device *dev = ssam_controller_device(ctrl);	\
++			dev_err(dev, "rqst: invalid response length, expected "	\
++				"%zu, got %zu (tc: 0x%02x, cid: 0x%02x)",	\
++				sizeof(rtype), rsp.length, rqst.target_category,\
++				rqst.command_id);				\
++			return -EIO;						\
++		}								\
++										\
++		return 0;							\
++	}
++
++
++/* -- Event notifier/callbacks. --------------------------------------------- */
++
++#define SSAM_NOTIF_STATE_SHIFT		2
++#define SSAM_NOTIF_STATE_MASK		((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
++
++/**
++ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
++ * callback functions.
++ *
++ * @SSAM_NOTIF_HANDLED:
++ *	Indicates that the notification has been handled. This flag should be
++ *	set by the handler if the handler can act/has acted upon the event
++ *	provided to it. This flag should not be set if the handler is not a
++ *	primary handler intended for the provided event.
++ *
++ *	If this flag has not been set by any handler after the notifier chain
++ *	has been traversed, a warning will be emitted, stating that the event
++ *	has not been handled.
++ *
++ * @SSAM_NOTIF_STOP:
++ *	Indicates that the notifier traversal should stop. If this flag is
++ *	returned from a notifier callback, notifier chain traversal will
++ *	immediately stop and any remaining notifiers will not be called. This
++ *	flag is automatically set when ssam_notifier_from_errno() is called
++ *	with a negative error value.
++ */
++enum ssam_notif_flags {
++	SSAM_NOTIF_HANDLED = BIT(0),
++	SSAM_NOTIF_STOP    = BIT(1),
++};
++
++
++struct ssam_event_notifier;
++
++typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
++				  const struct ssam_event *event);
++
++/**
++ * struct ssam_notifier_block - Base notifier block for SSAM event
++ * notifications.
++ * @next:     The next notifier block in order of priority.
++ * @fn:       The callback function of this notifier. This function takes the
++ *            respective notifier block and event as input and should return
++ *            a notifier value, which can either be obtained from the flags
++ *            provided in &enum ssam_notif_flags, converted from a standard
++ *            error value via ssam_notifier_from_errno(), or a combination of
++ *            both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
++ * @priority: Priority value determining the order in which notifier callbacks
++ *            will be called. A higher value means higher priority, i.e. the
++ *            associated callback will be executed earlier than other (lower
++ *            priority) callbacks.
++ */
++struct ssam_notifier_block {
++	struct ssam_notifier_block __rcu *next;
++	ssam_notifier_fn_t fn;
++	int priority;
++};
++
++/**
++ * ssam_notifier_from_errno() - Convert standard error value to notifier
++ * return code.
++ * @err: The error code to convert, must be negative (in case of failure) or
++ *       zero (in case of success).
++ *
++ * Return: Returns the notifier return value obtained by converting the
++ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
++ * will be set, causing notifier call chain traversal to abort.
++ */
++static inline u32 ssam_notifier_from_errno(int err)
++{
++	if (WARN_ON(err > 0) || err == 0)
++		return 0;
++	else
++		return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
++}
++
++/**
++ * ssam_notifier_to_errno() - Convert notifier return code to standard error
++ * value.
++ * @ret: The notifier return value to convert.
++ *
++ * Return: Returns the negative error value encoded in @ret or zero if @ret
++ * indicates success.
++ */
++static inline int ssam_notifier_to_errno(u32 ret)
++{
++	return -(ret >> SSAM_NOTIF_STATE_SHIFT);
++}
++
++
++/* -- Event/notification registry. ------------------------------------------ */
++
++/**
++ * struct ssam_event_registry - Registry specification used for enabling events.
++ * @target_category: Target category for the event registry requests.
++ * @target_id:       Target ID for the event registry requests.
++ * @cid_enable:      Command ID for the event-enable request.
++ * @cid_disable:     Command ID for the event-disable request.
++ *
++ * This struct describes a SAM event registry via the minimal collection of
++ * SAM IDs specifying the requests to use for enabling and disabling an event.
++ * The individual event to be enabled/disabled itself is specified via &struct
++ * ssam_event_id.
++ */
++struct ssam_event_registry {
++	u8 target_category;
++	u8 target_id;
++	u8 cid_enable;
++	u8 cid_disable;
++};
++
++/**
++ * struct ssam_event_id - Unique event ID used for enabling events.
++ * @target_category: Target category of the event source.
++ * @instance:        Instance ID of the event source.
++ *
++ * This struct specifies the event to be enabled/disabled via an externally
++ * provided registry. It does not specify the registry to be used itself, this
++ * is done via &struct ssam_event_registry.
++ */
++struct ssam_event_id {
++	u8 target_category;
++	u8 instance;
++};
++
++/**
++ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
++ *
++ * @SSAM_EVENT_MASK_NONE:
++ *	Run the callback for any event with matching target category. Do not
++ *	do any additional filtering.
++ *
++ * @SSAM_EVENT_MASK_TARGET:
++ *	In addition to filtering by target category, only execute the notifier
++ *	callback for events with a target ID matching to the one of the
++ *	registry used for enabling/disabling the event.
++ *
++ * @SSAM_EVENT_MASK_INSTANCE:
++ *	In addition to filtering by target category, only execute the notifier
++ *	callback for events with an instance ID matching to the instance ID
++ *	used when enabling the event.
++ *
++ * @SSAM_EVENT_MASK_STRICT:
++ *	Do all the filtering above.
++ */
++enum ssam_event_mask {
++	SSAM_EVENT_MASK_TARGET   = BIT(0),
++	SSAM_EVENT_MASK_INSTANCE = BIT(1),
++
++	SSAM_EVENT_MASK_NONE = 0,
++	SSAM_EVENT_MASK_STRICT
++		= SSAM_EVENT_MASK_TARGET
++		| SSAM_EVENT_MASK_INSTANCE,
++};
++
++
++/**
++ * SSAM_EVENT_REGISTRY() - Define a new event registry.
++ * @tc:      Target category for the event registry requests.
++ * @tid:     Target ID for the event registry requests.
++ * @cid_en:  Command ID for the event-enable request.
++ * @cid_dis: Command ID for the event-disable request.
++ *
++ * Return: Returns the &struct ssam_event_registry specified by the given
++ * parameters.
++ */
++#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis)	\
++	((struct ssam_event_registry) {			\
++		.target_category = (tc),		\
++		.target_id = (tid),			\
++		.cid_enable = (cid_en),			\
++		.cid_disable = (cid_dis),		\
++	})
++
++#define SSAM_EVENT_REGISTRY_SAM	\
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
++
++#define SSAM_EVENT_REGISTRY_KIP	\
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
++
++#define SSAM_EVENT_REGISTRY_REG \
++	SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
++
++
++/**
++ * struct ssam_event_notifier - Notifier block for SSAM events.
++ * @base:        The base notifier block with callback function and priority.
++ * @event:       The event for which this block will receive notifications.
++ * @event.reg:   Registry via which the event will be enabled/disabled.
++ * @event.id:    ID specifying the event.
++ * @event.mask:  Flags determining how events are matched to the notifier.
++ * @event.flags: Flags used for enabling the event.
++ */
++struct ssam_event_notifier {
++	struct ssam_notifier_block base;
++
++	struct {
++		struct ssam_event_registry reg;
++		struct ssam_event_id id;
++		enum ssam_event_mask mask;
++		u8 flags;
++	} event;
++};
++
++int ssam_notifier_register(struct ssam_controller *ctrl,
++			   struct ssam_event_notifier *n);
++
++int ssam_notifier_unregister(struct ssam_controller *ctrl,
++			     struct ssam_event_notifier *n);
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
+diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
+new file mode 100644
+index 000000000000..64b1299d7bd8
+--- /dev/null
++++ b/include/linux/surface_aggregator/device.h
+@@ -0,0 +1,430 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
++ *
++ * Main interface for the surface-aggregator bus, surface-aggregator client
++ * devices, and respective drivers building on top of the SSAM controller.
++ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
++ * subsystem.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
++#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
++
++#include <linux/device.h>
++#include <linux/mod_devicetable.h>
++#include <linux/types.h>
++
++#include <linux/surface_aggregator/controller.h>
++
++
++/* -- Surface System Aggregator Module bus. --------------------------------- */
++
++/**
++ * enum ssam_device_domain - SAM device domain.
++ * @SSAM_DOMAIN_VIRTUAL:   Virtual device.
++ * @SSAM_DOMAIN_SERIALHUB: Physical dovice connected via Surface Serial Hub.
++ */
++enum ssam_device_domain {
++	SSAM_DOMAIN_VIRTUAL   = 0x00,
++	SSAM_DOMAIN_SERIALHUB = 0x01,
++};
++
++/**
++ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
++ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
++ */
++enum ssam_virtual_tc {
++	SSAM_VIRTUAL_TC_HUB = 0x00,
++};
++
++/**
++ * struct ssam_device_uid - Unique identifier for SSAM device.
++ * @domain:   Domain of the device.
++ * @category: Target category of the device.
++ * @target:   Target ID of the device.
++ * @instance: Instance ID of the device.
++ * @function: Sub-function of the device. This field can be used to split a
++ *            single SAM device into multiple virtual subdevices to separate
++ *            different functionality of that device and allow one driver per
++ *            such functionality.
++ */
++struct ssam_device_uid {
++	u8 domain;
++	u8 category;
++	u8 target;
++	u8 instance;
++	u8 function;
++};
++
++/*
++ * Special values for device matching.
++ *
++ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
++ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
++ * match_flags member of the device ID structure. Do not use them directly
++ * with struct ssam_device_id or struct ssam_device_uid.
++ */
++#define SSAM_ANY_TID		0xffff
++#define SSAM_ANY_IID		0xffff
++#define SSAM_ANY_FUN		0xffff
++
++/**
++ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
++ * parameters.
++ * @d:   Domain of the device.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters. See &struct
++ * ssam_device_uid for details regarding the parameters. The special values
++ * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
++ * matching should ignore target ID, instance ID, and/or sub-function,
++ * respectively. This macro initializes the ``match_flags`` field based on the
++ * given parameters.
++ *
++ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
++ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_DEVICE(d, cat, tid, iid, fun)					\
++	.match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0)	\
++		     | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0)	\
++		     | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0),	\
++	.domain   = d,								\
++	.category = cat,							\
++	.target   = ((tid) != SSAM_ANY_TID) ? (tid) : 0,			\
++	.instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0,			\
++	.function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0				\
++
++/**
++ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
++ * the given parameters.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters in the
++ * virtual domain. See &struct ssam_device_uid for details regarding the
++ * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
++ * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
++ * instance ID, and/or sub-function, respectively. This macro initializes the
++ * ``match_flags`` field based on the given parameters.
++ *
++ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
++ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_VDEV(cat, tid, iid, fun) \
++	SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
++
++
++/**
++ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
++ * with the given parameters.
++ * @cat: Target category of the device.
++ * @tid: Target ID of the device.
++ * @iid: Instance ID of the device.
++ * @fun: Sub-function of the device.
++ *
++ * Initializes a &struct ssam_device_id with the given parameters in the SSH
++ * domain. See &struct ssam_device_uid for details regarding the parameters.
++ * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
++ * used to specify that matching should ignore target ID, instance ID, and/or
++ * sub-function, respectively. This macro initializes the ``match_flags``
++ * field based on the given parameters.
++ *
++ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
++ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
++ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
++ * allowed.
++ */
++#define SSAM_SDEV(cat, tid, iid, fun) \
++	SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
++
++
++/**
++ * struct ssam_device - SSAM client device.
++ * @dev:  Driver model representation of the device.
++ * @ctrl: SSAM controller managing this device.
++ * @uid:  UID identifying the device.
++ */
++struct ssam_device {
++	struct device dev;
++	struct ssam_controller *ctrl;
++
++	struct ssam_device_uid uid;
++};
++
++/**
++ * struct ssam_device_driver - SSAM client device driver.
++ * @driver:      Base driver model structure.
++ * @match_table: Match table specifying which devices the driver should bind to.
++ * @probe:       Called when the driver is being bound to a device.
++ * @remove:      Called when the driver is being unbound from the device.
++ */
++struct ssam_device_driver {
++	struct device_driver driver;
++
++	const struct ssam_device_id *match_table;
++
++	int  (*probe)(struct ssam_device *sdev);
++	void (*remove)(struct ssam_device *sdev);
++};
++
++extern struct bus_type ssam_bus_type;
++extern const struct device_type ssam_device_type;
++
++
++/**
++ * is_ssam_device() - Check if the given device is a SSAM client device.
++ * @d: The device to test the type of.
++ *
++ * Return: Returns %true iff the specified device is of type &struct
++ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
++ * otherwise.
++ */
++static inline bool is_ssam_device(struct device *d)
++{
++	return d->type == &ssam_device_type;
++}
++
++/**
++ * to_ssam_device() - Casts the given device to a SSAM client device.
++ * @d: The device to cast.
++ *
++ * Casts the given &struct device to a &struct ssam_device. The caller has to
++ * ensure that the given device is actually enclosed in a &struct ssam_device,
++ * e.g. by calling is_ssam_device().
++ *
++ * Return: Returns a pointer to the &struct ssam_device wrapping the given
++ * device @d.
++ */
++static inline struct ssam_device *to_ssam_device(struct device *d)
++{
++	return container_of(d, struct ssam_device, dev);
++}
++
++/**
++ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
++ * device driver.
++ * @d: The driver to cast.
++ *
++ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
++ * caller has to ensure that the given driver is actually enclosed in a
++ * &struct ssam_device_driver.
++ *
++ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
++ * given device driver @d.
++ */
++static inline
++struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
++{
++	return container_of(d, struct ssam_device_driver, driver);
++}
++
++
++const struct ssam_device_id *ssam_device_id_match(
++		const struct ssam_device_id *table,
++		const struct ssam_device_uid uid);
++
++const struct ssam_device_id *ssam_device_get_match(
++		const struct ssam_device *dev);
++
++const void *ssam_device_get_match_data(const struct ssam_device *dev);
++
++struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
++				      struct ssam_device_uid uid);
++
++int ssam_device_add(struct ssam_device *sdev);
++void ssam_device_remove(struct ssam_device *sdev);
++
++/**
++ * ssam_device_get() - Increment reference count of SSAM client device.
++ * @sdev: The device to increment the reference count of.
++ *
++ * Increments the reference count of the given SSAM client device by
++ * incrementing the reference count of the enclosed &struct device via
++ * get_device().
++ *
++ * See ssam_device_put() for the counter-part of this function.
++ *
++ * Return: Returns the device provided as input.
++ */
++static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
++{
++	return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
++}
++
++/**
++ * ssam_device_put() - Decrement reference count of SSAM client device.
++ * @sdev: The device to decrement the reference count of.
++ *
++ * Decrements the reference count of the given SSAM client device by
++ * decrementing the reference count of the enclosed &struct device via
++ * put_device().
++ *
++ * See ssam_device_get() for the counter-part of this function.
++ */
++static inline void ssam_device_put(struct ssam_device *sdev)
++{
++	if (sdev)
++		put_device(&sdev->dev);
++}
++
++/**
++ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
++ * @sdev: The device to get the driver-data from.
++ *
++ * Return: Returns the driver-data of the given device, previously set via
++ * ssam_device_set_drvdata().
++ */
++static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
++{
++	return dev_get_drvdata(&sdev->dev);
++}
++
++/**
++ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
++ * @sdev: The device to set the driver-data of.
++ * @data: The data to set the device's driver-data pointer to.
++ */
++static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
++{
++	dev_set_drvdata(&sdev->dev, data);
++}
++
++
++int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
++void ssam_device_driver_unregister(struct ssam_device_driver *d);
++
++/**
++ * ssam_device_driver_register() - Register a SSAM client device driver.
++ * @drv: The driver to register.
++ */
++#define ssam_device_driver_register(drv) \
++	__ssam_device_driver_register(drv, THIS_MODULE)
++
++/**
++ * module_ssam_device_driver() - Helper macro for SSAM device driver
++ * registration.
++ * @drv: The driver managed by this module.
++ *
++ * Helper macro to register a SSAM device driver via module_init() and
++ * module_exit(). This macro may only be used once per module and replaces
++ * the afforementioned definitions.
++ */
++#define module_ssam_device_driver(drv)			\
++	module_driver(drv, ssam_device_driver_register,	\
++		      ssam_device_driver_unregister)
++
++
++/* -- Helpers for client-device requests. ----------------------------------- */
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
++ * request function with neither argument nor return value.
++ * @name: Name of the generated function.
++ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request having neither argument nor return value. Device
++ * specifying parameters are not hard-coded, but instead are provided via the
++ * client device, specifically its UID, supplied when calling this function.
++ * The generated function takes care of setting up the request struct, buffer
++ * allocation, as well as execution of the request itself, returning once the
++ * request has been fully completed. The required transport buffer will be
++ * allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev)``,
++ * returning the status of the request, which is zero on success and negative
++ * on failure. The ``sdev`` parameter specifies both the target device of the
++ * request and by association the controller via which the request is sent.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...)			\
++	SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec)		\
++	int name(struct ssam_device *sdev)				\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
++ * request function with argument.
++ * @name:  Name of the generated function.
++ * @atype: Type of the request's argument.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking an argument of type @atype and having no
++ * return value. Device specifying parameters are not hard-coded, but instead
++ * are provided via the client device, specifically its UID, supplied when
++ * calling this function. The generated function takes care of setting up the
++ * request struct, buffer allocation, as well as execution of the request
++ * itself, returning once the request has been fully completed. The required
++ * transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev,
++ * const atype *arg)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``sdev`` parameter specifies both the
++ * target device of the request and by association the controller via which
++ * the request is sent. The request's argument is specified via the ``arg``
++ * pointer.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...)		\
++	SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec)	\
++	int name(struct ssam_device *sdev, const atype *arg)		\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance, arg);		\
++	}
++
++/**
++ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
++ * request function with return value.
++ * @name:  Name of the generated function.
++ * @rtype: Type of the request's return value.
++ * @spec:  Specification (&struct ssam_request_spec_md) defining the request.
++ *
++ * Defines a function executing the synchronous SAM request specified by
++ * @spec, with the request taking no argument but having a return value of
++ * type @rtype. Device specifying parameters are not hard-coded, but instead
++ * are provided via the client device, specifically its UID, supplied when
++ * calling this function. The generated function takes care of setting up the
++ * request struct, buffer allocation, as well as execution of the request
++ * itself, returning once the request has been fully completed. The required
++ * transport buffer will be allocated on the stack.
++ *
++ * The generated function is defined as ``int name(struct ssam_device *sdev,
++ * rtype *ret)``, returning the status of the request, which is zero on
++ * success and negative on failure. The ``sdev`` parameter specifies both the
++ * target device of the request and by association the controller via which
++ * the request is sent. The request's return value is written to the memory
++ * pointed to by the ``ret`` parameter.
++ *
++ * Refer to ssam_request_sync_onstack() for more details on the behavior of
++ * the generated function.
++ */
++#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...)		\
++	SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec)	\
++	int name(struct ssam_device *sdev, rtype *ret)			\
++	{								\
++		return __raw_##name(sdev->ctrl, sdev->uid.target,	\
++				    sdev->uid.instance, ret);		\
++	}
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
+diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
+new file mode 100644
+index 000000000000..376313f402b2
+--- /dev/null
++++ b/include/linux/surface_aggregator/serial_hub.h
+@@ -0,0 +1,659 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Surface Serial Hub (SSH) protocol and communication interface.
++ *
++ * Lower-level communication layers and SSH protocol definitions for the
++ * Surface System Aggregator Module (SSAM). Provides the interface for basic
++ * packet- and request-based communication with the SSAM EC via SSH.
++ *
++ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
++#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
++
++#include <linux/crc-ccitt.h>
++#include <linux/kref.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/types.h>
++
++
++/* -- Data structures for SAM-over-SSH communication. ----------------------- */
++
++/**
++ * enum ssh_frame_type - Frame types for SSH frames.
++ *
++ * @SSH_FRAME_TYPE_DATA_SEQ:
++ *	Indicates a data frame, followed by a payload with the length specified
++ *	in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
++ *	that an ACK is required.
++ *
++ * @SSH_FRAME_TYPE_DATA_NSQ:
++ *	Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
++ *	message does not have to be ACKed.
++ *
++ * @SSH_FRAME_TYPE_ACK:
++ *	Indicates an ACK message.
++ *
++ * @SSH_FRAME_TYPE_NAK:
++ *	Indicates an error response for previously sent frame. In general, this
++ *	means that the frame and/or payload is malformed, e.g. a CRC is wrong.
++ *	For command-type payloads, this can also mean that the command is
++ *	invalid.
++ */
++enum ssh_frame_type {
++	SSH_FRAME_TYPE_DATA_SEQ = 0x80,
++	SSH_FRAME_TYPE_DATA_NSQ = 0x00,
++	SSH_FRAME_TYPE_ACK	= 0x40,
++	SSH_FRAME_TYPE_NAK	= 0x04,
++};
++
++/**
++ * struct ssh_frame - SSH communication frame.
++ * @type: The type of the frame. See &enum ssh_frame_type.
++ * @len:  The length of the frame payload directly following the CRC for this
++ *        frame. Does not include the final CRC for that payload.
++ * @seq:  The sequence number for this message/exchange.
++ */
++struct ssh_frame {
++	u8 type;
++	__le16 len;
++	u8 seq;
++} __packed;
++
++static_assert(sizeof(struct ssh_frame) == 4);
++
++/*
++ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
++ *
++ * This is the physical maximum length of the protocol. Implementations may
++ * set a more constrained limit.
++ */
++#define SSH_FRAME_MAX_PAYLOAD_SIZE	U16_MAX
++
++/**
++ * enum ssh_payload_type - Type indicator for the SSH payload.
++ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
++ *                    payload.
++ */
++enum ssh_payload_type {
++	SSH_PLD_TYPE_CMD = 0x80,
++};
++
++/**
++ * struct ssh_command - Payload of a command-type frame.
++ * @type:    The type of the payload. See &enum ssh_payload_type. Should be
++ *           SSH_PLD_TYPE_CMD for this struct.
++ * @tc:      Command target category.
++ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
++ *           message.
++ * @tid_in:  Input target ID. Should be zero if this is an outgoing (host to
++ *           EC) message.
++ * @iid:     Instance ID.
++ * @rqid:    Request ID. Used to match requests with responses and differentiate
++ *           between responses and events.
++ * @cid:     Command ID.
++ */
++struct ssh_command {
++	u8 type;
++	u8 tc;
++	u8 tid_out;
++	u8 tid_in;
++	u8 iid;
++	__le16 rqid;
++	u8 cid;
++} __packed;
++
++static_assert(sizeof(struct ssh_command) == 8);
++
++/*
++ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
++ *
++ * This is the physical maximum length of the protocol. Implementations may
++ * set a more constrained limit.
++ */
++#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
++	(SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
++
++/*
++ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
++ *
++ * This is the minimum number of bytes required to form a message. The actual
++ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
++ */
++#define SSH_MSG_LEN_BASE	(sizeof(struct ssh_frame) + 3ull * sizeof(u16))
++
++/*
++ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
++ *
++ * This is the length of a SSH control message, which is equal to a SSH
++ * message without any payload.
++ */
++#define SSH_MSG_LEN_CTRL	SSH_MSG_LEN_BASE
++
++/**
++ * SSH_MESSAGE_LENGTH() - Comute length of SSH message.
++ * @payload_size: Length of the payload inside the SSH frame.
++ *
++ * Return: Returns the length of a SSH message with payload of specified size.
++ */
++#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size)
++
++/**
++ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
++ * @payload_size: Length of the command payload.
++ *
++ * Return: Returns the length of a SSH command message with command payload of
++ * specified size.
++ */
++#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
++	SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size)
++
++/**
++ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
++ * frame.
++ * @field: The field for which the offset should be computed.
++ *
++ * Return: Returns the offset of the specified &struct ssh_frame field in the
++ * raw SSH message data as.
++ */
++#define SSH_MSGOFFSET_FRAME(field) \
++	(sizeof(u16) + offsetof(struct ssh_frame, field))
++
++/**
++ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
++ * in command.
++ * @field: The field for which the offset should be computed.
++ *
++ * Return: Returns the offset of the specified &struct ssh_command field in
++ * the raw SSH message data.
++ */
++#define SSH_MSGOFFSET_COMMAND(field) \
++	(2ull * sizeof(u16) + sizeof(struct ssh_frame) \
++		+ offsetof(struct ssh_command, field))
++
++/*
++ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
++ */
++#define SSH_MSG_SYN		((u16)0x55aa)
++
++/**
++ * ssh_crc() - Compute CRC for SSH messages.
++ * @buf: The pointer pointing to the data for which the CRC should be computed.
++ * @len: The length of the data for which the CRC should be computed.
++ *
++ * Return: Returns the CRC computed on the provided data, as used for SSH
++ * messages.
++ */
++static inline u16 ssh_crc(const u8 *buf, size_t len)
++{
++	return crc_ccitt_false(0xffff, buf, len);
++}
++
++/*
++ * SSH_NUM_EVENTS - The number of reserved event IDs.
++ *
++ * The number of reserved event IDs, used for registering an SSH event
++ * handler. Valid event IDs are numbers below or equal to this value, with
++ * exception of zero, which is not an event ID. Thus, this is also the
++ * absolute maximum number of event handlers that can be registered.
++ */
++#define SSH_NUM_EVENTS		34
++
++/*
++ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
++ */
++#define SSH_NUM_TARGETS		2
++
++/**
++ * ssh_rqid_next_valid() - Return the next valid request ID.
++ * @rqid: The current request ID.
++ *
++ * Return: Returns the next valid request ID, following the current request ID
++ * provided to this function. This function skips any request IDs reserved for
++ * events.
++ */
++static inline u16 ssh_rqid_next_valid(u16 rqid)
++{
++	return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
++}
++
++/**
++ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
++ * @rqid: The request ID to convert.
++ */
++static inline u16 ssh_rqid_to_event(u16 rqid)
++{
++	return rqid - 1u;
++}
++
++/**
++ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
++ * @rqid: The request ID to check.
++ */
++static inline bool ssh_rqid_is_event(u16 rqid)
++{
++	return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
++}
++
++/**
++ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
++ * @tc: The target category to convert.
++ */
++static inline u16 ssh_tc_to_rqid(u8 tc)
++{
++	return tc;
++}
++
++/**
++ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
++ * @tid: The target ID to convert.
++ */
++static inline u8 ssh_tid_to_index(u8 tid)
++{
++	return tid - 1u;
++}
++
++/**
++ * ssh_tid_is_valid() - Check if target ID is valid/supported.
++ * @tid: The target ID to check.
++ */
++static inline bool ssh_tid_is_valid(u8 tid)
++{
++	return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
++}
++
++/**
++ * struct ssam_span - Reference to a buffer region.
++ * @ptr: Pointer to the buffer region.
++ * @len: Length of the buffer region.
++ *
++ * A reference to a (non-owned) buffer segment, consisting of pointer and
++ * length. Use of this struct indicates non-owned data, i.e. data of which the
++ * life-time is managed (i.e. it is allocated/freed) via another pointer.
++ */
++struct ssam_span {
++	u8    *ptr;
++	size_t len;
++};
++
++enum ssam_ssh_tc {
++	/* Known SSH/EC target categories. */
++				// category 0x00 is invalid for EC use
++	SSAM_SSH_TC_SAM = 0x01,	// generic system functionality, real-time clock
++	SSAM_SSH_TC_BAT = 0x02,	// battery/power subsystem
++	SSAM_SSH_TC_TMP = 0x03,	// thermal subsystem
++	SSAM_SSH_TC_PMC = 0x04,
++	SSAM_SSH_TC_FAN = 0x05,
++	SSAM_SSH_TC_PoM = 0x06,
++	SSAM_SSH_TC_DBG = 0x07,
++	SSAM_SSH_TC_KBD = 0x08,	// legacy keyboard (Laptop 1/2)
++	SSAM_SSH_TC_FWU = 0x09,
++	SSAM_SSH_TC_UNI = 0x0a,
++	SSAM_SSH_TC_LPC = 0x0b,
++	SSAM_SSH_TC_TCL = 0x0c,
++	SSAM_SSH_TC_SFL = 0x0d,
++	SSAM_SSH_TC_KIP = 0x0e,
++	SSAM_SSH_TC_EXT = 0x0f,
++	SSAM_SSH_TC_BLD = 0x10,
++	SSAM_SSH_TC_BAS = 0x11,	// detachment system (Surface Book 2/3)
++	SSAM_SSH_TC_SEN = 0x12,
++	SSAM_SSH_TC_SRQ = 0x13,
++	SSAM_SSH_TC_MCU = 0x14,
++	SSAM_SSH_TC_HID = 0x15,	// generic HID input subsystem
++	SSAM_SSH_TC_TCH = 0x16,
++	SSAM_SSH_TC_BKL = 0x17,
++	SSAM_SSH_TC_TAM = 0x18,
++	SSAM_SSH_TC_ACC = 0x19,
++	SSAM_SSH_TC_UFI = 0x1a,
++	SSAM_SSH_TC_USC = 0x1b,
++	SSAM_SSH_TC_PEN = 0x1c,
++	SSAM_SSH_TC_VID = 0x1d,
++	SSAM_SSH_TC_AUD = 0x1e,
++	SSAM_SSH_TC_SMC = 0x1f,
++	SSAM_SSH_TC_KPD = 0x20,
++	SSAM_SSH_TC_REG = 0x21,
++};
++
++
++/* -- Packet transport layer (ptl). ----------------------------------------- */
++
++/**
++ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
++ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
++ * @SSH_PACKET_PRIORITY_DATA:  Base priority for normal data paackets.
++ * @SSH_PACKET_PRIORITY_NAK:   Base priority for NAK packets.
++ * @SSH_PACKET_PRIORITY_ACK:   Base priority for ACK packets.
++ */
++enum ssh_packet_base_priority {
++	SSH_PACKET_PRIORITY_FLUSH = 0,	/* same as DATA to sequence flush */
++	SSH_PACKET_PRIORITY_DATA  = 0,
++	SSH_PACKET_PRIORITY_NAK   = 1,
++	SSH_PACKET_PRIORITY_ACK   = 2,
++};
++
++/*
++ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
++ */
++#define __SSH_PACKET_PRIORITY(base, try) \
++	(((base) << 4) | ((try) & 0x0f))
++
++/**
++ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
++ * number of tries.
++ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
++ *        ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
++ * @try:  The number of tries (must be less than 16).
++ *
++ * Compute the combined packet priority. The combined priority is dominated by
++ * the base priority, whereas the number of (re-)tries decides the precedence
++ * of packets with the same base priority, giving higher priority to packets
++ * that already have more tries.
++ *
++ * Return: Returns the computed priority as value fitting inside a &u8. A
++ * higher number means a higher priority.
++ */
++#define SSH_PACKET_PRIORITY(base, try) \
++	__SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
++
++/**
++ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
++ * @priority: The packet priority.
++ *
++ * Return: Returns the number of tries encoded in the specified packet
++ * priority.
++ */
++static inline u8 ssh_packet_priority_get_try(u8 priority)
++{
++	return priority & 0x0f;
++}
++
++/**
++ * ssh_packet_priority_get_base - Get base priority from packet priority.
++ * @priority: The packet priority.
++ *
++ * Return: Returns the base priority encoded in the given packet priority.
++ */
++static inline u8 ssh_packet_priority_get_base(u8 priority)
++{
++	return (priority & 0xf0) >> 4;
++}
++
++
++enum ssh_packet_flags {
++	/* state flags */
++	SSH_PACKET_SF_LOCKED_BIT,
++	SSH_PACKET_SF_QUEUED_BIT,
++	SSH_PACKET_SF_PENDING_BIT,
++	SSH_PACKET_SF_TRANSMITTING_BIT,
++	SSH_PACKET_SF_TRANSMITTED_BIT,
++	SSH_PACKET_SF_ACKED_BIT,
++	SSH_PACKET_SF_CANCELED_BIT,
++	SSH_PACKET_SF_COMPLETED_BIT,
++
++	/* type flags */
++	SSH_PACKET_TY_FLUSH_BIT,
++	SSH_PACKET_TY_SEQUENCED_BIT,
++	SSH_PACKET_TY_BLOCKING_BIT,
++
++	/* mask for state flags */
++	SSH_PACKET_FLAGS_SF_MASK =
++		  BIT(SSH_PACKET_SF_LOCKED_BIT)
++		| BIT(SSH_PACKET_SF_QUEUED_BIT)
++		| BIT(SSH_PACKET_SF_PENDING_BIT)
++		| BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
++		| BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
++		| BIT(SSH_PACKET_SF_ACKED_BIT)
++		| BIT(SSH_PACKET_SF_CANCELED_BIT)
++		| BIT(SSH_PACKET_SF_COMPLETED_BIT),
++
++	/* mask for type flags */
++	SSH_PACKET_FLAGS_TY_MASK =
++		  BIT(SSH_PACKET_TY_FLUSH_BIT)
++		| BIT(SSH_PACKET_TY_SEQUENCED_BIT)
++		| BIT(SSH_PACKET_TY_BLOCKING_BIT),
++};
++
++
++struct ssh_ptl;
++struct ssh_packet;
++
++/**
++ * struct ssh_packet_ops - Callback operations for a SSH packet.
++ * @release:  Function called when the packet reference count reaches zero.
++ *            This callback must be relied upon to ensure that the packet has
++ *            left the transport system(s).
++ * @complete: Function called when the packet is completed, either with
++ *            success or failure. In case of failure, the reason for the
++ *            failure is indicated by the value of the provided status code
++ *            argument. This value will be zero in case of success. Note that
++ *            a call to this callback does not guarantee that the packet is
++ *            not in use by the transport system any more.
++ */
++struct ssh_packet_ops {
++	void (*release)(struct ssh_packet *p);
++	void (*complete)(struct ssh_packet *p, int status);
++};
++
++/**
++ * struct ssh_packet - SSH transport packet.
++ * @ptl:      Pointer to the packet transport layer. May be %NULL if the packet
++ *            (or enclosing request) has not been submitted yet.
++ * @refcnt:   Reference count of the packet.
++ * @priority: Priority of the packet. Must be computed via
++ *            SSH_PACKET_PRIORITY().
++ * @data:     Raw message data.
++ * @data.len: Length of the raw message data.
++ * @data.ptr: Pointer to the raw message data buffer.
++ * @state:    State and type flags describing current packet state (dynamic)
++ *            and type (static). See &enum ssh_packet_flags for possible
++ *            options.
++ * @timestamp: Timestamp specifying when the latest transmission of a
++ *            currently pending packet has been started. May be %KTIME_MAX
++ *            before or in-between transmission attempts. Used for the packet
++ *            timeout implementation.
++ * @queue_node:	The list node for the packet queue.
++ * @pending_node: The list node for the set of pending packets.
++ * @ops:      Packet operations.
++ */
++struct ssh_packet {
++	struct ssh_ptl *ptl;
++	struct kref refcnt;
++
++	u8 priority;
++
++	struct {
++		size_t len;
++		u8 *ptr;
++	} data;
++
++	unsigned long state;
++	ktime_t timestamp;
++
++	struct list_head queue_node;
++	struct list_head pending_node;
++
++	const struct ssh_packet_ops *ops;
++};
++
++struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
++void ssh_packet_put(struct ssh_packet *p);
++
++/**
++ * ssh_packet_set_data() - Set raw message data of packet.
++ * @p:   The packet for which the message data should be set.
++ * @ptr: Pointer to the memory holding the message data.
++ * @len: Length of the message data.
++ *
++ * Sets the raw message data buffer of the packet to the provided memory. The
++ * memory is not copied. Instead, the caller is responsible for management
++ * (i.e. allocation and deallocation) of the memory. The caller must ensure
++ * that the provided memory is valid and contains a valid SSH message,
++ * starting from the time of submission of the packet until the ``release``
++ * callback has been called. During this time, the memory may not be altered
++ * in any way.
++ */
++static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
++{
++	p->data.ptr = ptr;
++	p->data.len = len;
++}
++
++
++/* -- Request transport layer (rtl). ---------------------------------------- */
++
++enum ssh_request_flags {
++	/* state flags */
++	SSH_REQUEST_SF_LOCKED_BIT,
++	SSH_REQUEST_SF_QUEUED_BIT,
++	SSH_REQUEST_SF_PENDING_BIT,
++	SSH_REQUEST_SF_TRANSMITTING_BIT,
++	SSH_REQUEST_SF_TRANSMITTED_BIT,
++	SSH_REQUEST_SF_RSPRCVD_BIT,
++	SSH_REQUEST_SF_CANCELED_BIT,
++	SSH_REQUEST_SF_COMPLETED_BIT,
++
++	/* type flags */
++	SSH_REQUEST_TY_FLUSH_BIT,
++	SSH_REQUEST_TY_HAS_RESPONSE_BIT,
++
++	/* mask for state flags */
++	SSH_REQUEST_FLAGS_SF_MASK =
++		  BIT(SSH_REQUEST_SF_LOCKED_BIT)
++		| BIT(SSH_REQUEST_SF_QUEUED_BIT)
++		| BIT(SSH_REQUEST_SF_PENDING_BIT)
++		| BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
++		| BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
++		| BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
++		| BIT(SSH_REQUEST_SF_CANCELED_BIT)
++		| BIT(SSH_REQUEST_SF_COMPLETED_BIT),
++
++	/* mask for type flags */
++	SSH_REQUEST_FLAGS_TY_MASK =
++		  BIT(SSH_REQUEST_TY_FLUSH_BIT)
++		| BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
++};
++
++
++struct ssh_rtl;
++struct ssh_request;
++
++/**
++ * struct ssh_request_ops - Callback operations for a SSH request.
++ * @release:  Function called when the request's reference count reaches zero.
++ *            This callback must be relied upon to ensure that the request has
++ *            left the transport systems (both, packet an request systems).
++ * @complete: Function called when the request is completed, either with
++ *            success or failure. The command data for the request response
++ *            is provided via the &struct ssh_command parameter (``cmd``),
++ *            the command payload of the request response via the &struct
++ *            ssh_span parameter (``data``).
++ *
++ *            If the request does not have any response or has not been
++ *            completed with success, both ``cmd`` and ``data`` parameters will
++ *            be NULL. If the request response does not have any command
++ *            payload, the ``data`` span will be an empty (zero-length) span.
++ *
++ *            In case of failure, the reason for the failure is indicated by
++ *            the value of the provided status code argument (``status``). This
++ *            value will be zero in case of success.
++ *
++ *            Note that a call to this callback does not guarantee that the
++ *            request is not in use by the transport systems any more.
++ */
++struct ssh_request_ops {
++	void (*release)(struct ssh_request *rqst);
++	void (*complete)(struct ssh_request *rqst,
++			 const struct ssh_command *cmd,
++			 const struct ssam_span *data, int status);
++};
++
++/**
++ * struct ssh_request - SSH transport request.
++ * @packet: The underlying SSH transport packet.
++ * @node:   List node for the request queue and pending set.
++ * @state:  State and type flags describing current request state (dynamic)
++ *          and type (static). See &enum ssh_request_flags for possible
++ *          options.
++ * @timestamp: Timestamp specifying when we start waiting on the respnse of the
++ *          request. This is set once the underlying packet has been completed
++ *          and may be %KTIME_MAX before that, or when the request does not
++ *          expect a response. Used for the request timeout implementation.
++ * @ops:    Request Operations.
++ */
++struct ssh_request {
++	struct ssh_packet packet;
++	struct list_head node;
++
++	unsigned long state;
++	ktime_t timestamp;
++
++	const struct ssh_request_ops *ops;
++};
++
++/**
++ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
++ * @p: The packet to cast.
++ *
++ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
++ * The caller is responsible for making sure that the packet is actually
++ * wrapped in a &struct ssh_request.
++ *
++ * Return: Returns the &struct ssh_request wrapping the provided packet.
++ */
++static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
++{
++	return container_of(p, struct ssh_request, packet);
++}
++
++/**
++ * ssh_request_get() - Increment reference count of request.
++ * @r: The request to increment the reference count of.
++ *
++ * Increments the reference count of the given request by incrementing the
++ * reference count of the underlying &struct ssh_packet, enclosed in it.
++ *
++ * See also ssh_request_put(), ssh_packet_get().
++ *
++ * Return: Returns the request provided as input.
++ */
++static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
++{
++	return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
++}
++
++/**
++ * ssh_request_put() - Decrement reference count of request.
++ * @r: The request to decrement the reference count of.
++ *
++ * Decrements the reference count of the given request by decrementing the
++ * reference count of the underlying &struct ssh_packet, enclosed in it. If
++ * the reference count reaches zero, the ``release`` callback specified in the
++ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
++ * called.
++ *
++ * See also ssh_request_get(), ssh_packet_put().
++ */
++static inline void ssh_request_put(struct ssh_request *r)
++{
++	if (r)
++		ssh_packet_put(&r->packet);
++}
++
++/**
++ * ssh_request_set_data() - Set raw message data of request.
++ * @r:   The request for which the message data should be set.
++ * @ptr: Pointer to the memory holding the message data.
++ * @len: Length of the message data.
++ *
++ * Sets the raw message data buffer of the underlying packet to the specified
++ * buffer. Does not copy the actual message data, just sets the buffer pointer
++ * and length. Refer to ssh_packet_set_data() for more details.
++ */
++static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
++{
++	ssh_packet_set_data(&r->packet, ptr, len);
++}
++
++#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
+diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
+new file mode 100644
+index 000000000000..1a8bc0249f8e
+--- /dev/null
++++ b/include/uapi/linux/surface_aggregator/cdev.h
+@@ -0,0 +1,58 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * Surface System Aggregator Module (SSAM) user-space EC interface.
++ *
++ * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc
++ * device. This device provides direct user-space access to the SSAM EC.
++ * Intended for debugging and development.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
++#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/**
++ * struct ssam_cdev_request - Controller request IOCTL argument.
++ * @target_category: Target category of the SAM request.
++ * @target_id:       Target ID of the SAM request.
++ * @command_id:      Command ID of the SAM request.
++ * @instance_id:     Instance ID of the SAM request.
++ * @flags:           SAM Request flags.
++ * @status:          Request status (output).
++ * @payload:         Request payload (input data).
++ * @payload.data:    Pointer to request payload data.
++ * @payload.length:  Length of request payload data (in bytes).
++ * @response:        Request response (output data).
++ * @response.data:   Pointer to response buffer.
++ * @response.length: On input: Capacity of response buffer (in bytes).
++ *                   On output: Length of request response (number of bytes
++ *                   in the buffer that are actually used).
++ */
++struct ssam_cdev_request {
++	__u8 target_category;
++	__u8 target_id;
++	__u8 command_id;
++	__u8 instance_id;
++	__u16 flags;
++	__s16 status;
++
++	struct {
++		__u64 data;
++		__u16 length;
++		__u8 __pad[6];
++	} payload;
++
++	struct {
++		__u64 data;
++		__u16 length;
++		__u8 __pad[6];
++	} response;
++} __attribute__((__packed__));
++
++#define SSAM_CDEV_REQUEST	_IOWR(0xA5, 1, struct ssam_cdev_request)
++
++#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
+diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
+new file mode 100644
+index 000000000000..d88cabfb8dd7
+--- /dev/null
++++ b/include/uapi/linux/surface_aggregator/dtx.h
+@@ -0,0 +1,150 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * Surface DTX (clipboard detachment system driver) user-space interface.
++ *
++ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
++ * device allows user-space to control the clipboard detachment process on
++ * Surface Book series devices.
++ *
++ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
++ */
++
++#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
++#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++
++/* Status/error categories */
++#define SDTX_CATEGORY_STATUS		0x0000
++#define SDTX_CATEGORY_RUNTIME_ERROR	0x1000
++#define SDTX_CATEGORY_HARDWARE_ERROR	0x2000
++#define SDTX_CATEGORY_UNKNOWN		0xf000
++
++#define SDTX_CATEGORY_MASK		0xf000
++#define SDTX_CATEGORY(value)		((value) & SDTX_CATEGORY_MASK)
++
++#define SDTX_STATUS(code)		((code) | SDTX_CATEGORY_STATUS)
++#define SDTX_ERR_RT(code)		((code) | SDTX_CATEGORY_RUNTIME_ERROR)
++#define SDTX_ERR_HW(code)		((code) | SDTX_CATEGORY_HARDWARE_ERROR)
++#define SDTX_UNKNOWN(code)		((code) | SDTX_CATEGORY_UNKNOWN)
++
++#define SDTX_SUCCESS(value)	(SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
++
++/* Latch status values */
++#define SDTX_LATCH_CLOSED		SDTX_STATUS(0x00)
++#define SDTX_LATCH_OPENED		SDTX_STATUS(0x01)
++
++/* Base state values */
++#define SDTX_BASE_DETACHED		SDTX_STATUS(0x00)
++#define SDTX_BASE_ATTACHED		SDTX_STATUS(0x01)
++
++/* Runtime errors (non-critical) */
++#define SDTX_DETACH_NOT_FEASIBLE	SDTX_ERR_RT(0x01)
++#define SDTX_DETACH_TIMEDOUT		SDTX_ERR_RT(0x02)
++
++/* Hardware errors (critical) */
++#define SDTX_ERR_FAILED_TO_OPEN		SDTX_ERR_HW(0x01)
++#define SDTX_ERR_FAILED_TO_REMAIN_OPEN	SDTX_ERR_HW(0x02)
++#define SDTX_ERR_FAILED_TO_CLOSE	SDTX_ERR_HW(0x03)
++
++
++/* Base types */
++#define SDTX_DEVICE_TYPE_HID		0x0100
++#define SDTX_DEVICE_TYPE_SSH		0x0200
++
++#define SDTX_DEVICE_TYPE_MASK		0x0f00
++#define SDTX_DEVICE_TYPE(value)		((value) & SDTX_DEVICE_TYPE_MASK)
++
++#define SDTX_BASE_TYPE_HID(id)		((id) | SDTX_DEVICE_TYPE_HID)
++#define SDTX_BASE_TYPE_SSH(id)		((id) | SDTX_DEVICE_TYPE_SSH)
++
++
++/**
++ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
++ * attached to the base of the device.
++ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
++ *                           device operates as tablet.
++ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
++ *                           and the device operates as laptop.
++ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
++ *                           The device operates as tablet with keyboard and
++ *                           touchpad deactivated, however, the base battery
++ *                           and, if present in the specific device model, dGPU
++ *                           are available to the system.
++ */
++enum sdtx_device_mode {
++	SDTX_DEVICE_MODE_TABLET		= 0x00,
++	SDTX_DEVICE_MODE_LAPTOP		= 0x01,
++	SDTX_DEVICE_MODE_STUDIO		= 0x02,
++};
++
++/**
++ * struct sdtx_event - Event provided by reading from the DTX device file.
++ * @length: Length of the event payload, in bytes.
++ * @code:   Event code, detailing what type of event this is.
++ * @data:   Payload of the event, containing @length bytes.
++ *
++ * See &enum sdtx_event_code for currently valid event codes.
++ */
++struct sdtx_event {
++	__u16 length;
++	__u16 code;
++	__u8 data[];
++} __packed;
++
++/**
++ * enum sdtx_event_code - Code describing the type of an event.
++ * @SDTX_EVENT_REQUEST:         Detachment request event type.
++ * @SDTX_EVENT_CANCEL:          Cancel detachment process event type.
++ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
++ * @SDTX_EVENT_LATCH_STATUS:    Latch status change event type.
++ * @SDTX_EVENT_DEVICE_MODE:     Device mode change event type.
++ *
++ * Used in @struct sdtx_event to describe the type of the event. Further event
++ * codes are reserved for future use. Any event parser should be able to
++ * gracefully handle unknown events, i.e. by simply skipping them.
++ *
++ * Consult the DTX user-space interface documentation for details regarding
++ * the individual event types.
++ */
++enum sdtx_event_code {
++	SDTX_EVENT_REQUEST		= 1,
++	SDTX_EVENT_CANCEL		= 2,
++	SDTX_EVENT_BASE_CONNECTION	= 3,
++	SDTX_EVENT_LATCH_STATUS		= 4,
++	SDTX_EVENT_DEVICE_MODE		= 5,
++};
++
++/**
++ * struct sdtx_base_info - Describes if and what type of base is connected.
++ * @state:   The state of the connection. Valid values are %SDTX_BASE_DETACHED,
++ *           %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
++ *           is attached but low clipboard battery prevents detachment). Other
++ *           values are currently reserved.
++ * @base_id: The type of base connected. Zero if no base is connected.
++ */
++struct sdtx_base_info {
++	__u16 state;
++	__u16 base_id;
++} __packed;
++
++
++/* IOCTLs */
++#define SDTX_IOCTL_EVENTS_ENABLE	_IO(0xa5, 0x21)
++#define SDTX_IOCTL_EVENTS_DISABLE	_IO(0xa5, 0x22)
++
++#define SDTX_IOCTL_LATCH_LOCK		_IO(0xa5, 0x23)
++#define SDTX_IOCTL_LATCH_UNLOCK		_IO(0xa5, 0x24)
++
++#define SDTX_IOCTL_LATCH_REQUEST	_IO(0xa5, 0x25)
++#define SDTX_IOCTL_LATCH_CONFIRM	_IO(0xa5, 0x26)
++#define SDTX_IOCTL_LATCH_HEARTBEAT	_IO(0xa5, 0x27)
++#define SDTX_IOCTL_LATCH_CANCEL		_IO(0xa5, 0x28)
++
++#define SDTX_IOCTL_GET_BASE_INFO	_IOR(0xa5, 0x29, struct sdtx_base_info)
++#define SDTX_IOCTL_GET_DEVICE_MODE	_IOR(0xa5, 0x2a, __u16)
++#define SDTX_IOCTL_GET_LATCH_STATUS	_IOR(0xa5, 0x2b, __u16)
++
++#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
+diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
+index bcff122d0dc8..4339377ad929 100644
+--- a/scripts/mod/devicetable-offsets.c
++++ b/scripts/mod/devicetable-offsets.c
+@@ -245,8 +245,9 @@ int main(void)
+ 
+ 	DEVID(ssam_device_id);
+ 	DEVID_FIELD(ssam_device_id, match_flags);
++	DEVID_FIELD(ssam_device_id, domain);
+ 	DEVID_FIELD(ssam_device_id, category);
+-	DEVID_FIELD(ssam_device_id, channel);
++	DEVID_FIELD(ssam_device_id, target);
+ 	DEVID_FIELD(ssam_device_id, instance);
+ 	DEVID_FIELD(ssam_device_id, function);
+ 
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index a6c583362b92..5b79fdc42641 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1368,20 +1368,22 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
+ 	return 1;
+ }
+ 
+-/* Looks like: ssam:cNtNiNfN
++/*
++ * Looks like: ssam:dNcNtNiNfN
+  *
+  * N is exactly 2 digits, where each is an upper-case hex digit.
+  */
+ static int do_ssam_entry(const char *filename, void *symval, char *alias)
+ {
+ 	DEF_FIELD(symval, ssam_device_id, match_flags);
++	DEF_FIELD(symval, ssam_device_id, domain);
+ 	DEF_FIELD(symval, ssam_device_id, category);
+-	DEF_FIELD(symval, ssam_device_id, channel);
++	DEF_FIELD(symval, ssam_device_id, target);
+ 	DEF_FIELD(symval, ssam_device_id, instance);
+ 	DEF_FIELD(symval, ssam_device_id, function);
+ 
+-	sprintf(alias, "ssam:c%02X", category);
+-	ADD(alias, "t", match_flags & SSAM_MATCH_CHANNEL, channel);
++	sprintf(alias, "ssam:d%02Xc%02X", domain, category);
++	ADD(alias, "t", match_flags & SSAM_MATCH_TARGET, target);
+ 	ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
+ 	ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
+ 
+-- 
+2.28.0
+

+ 70 - 0
patches/5.9/0007-i2c-core-Restore-acpi_walk_dep_device_list-getting-c.patch

@@ -0,0 +1,70 @@
+From 93bb0042f8df962612190d85ffa178477833af51 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 14 Oct 2020 16:41:58 +0200
+Subject: [PATCH] i2c: core: Restore acpi_walk_dep_device_list() getting called
+ after registering the ACPI i2c devs
+
+Commit 21653a4181ff ("i2c: core: Call i2c_acpi_install_space_handler()
+before i2c_acpi_register_devices()")'s intention was to only move the
+acpi_install_address_space_handler() call to the point before where
+the ACPI declared i2c-children of the adapter where instantiated by
+i2c_acpi_register_devices().
+
+But i2c_acpi_install_space_handler() had a call to
+acpi_walk_dep_device_list() hidden (that is I missed it) at the end
+of it, so as an unwanted side-effect now acpi_walk_dep_device_list()
+was also being called before i2c_acpi_register_devices().
+
+Move the acpi_walk_dep_device_list() call to the end of
+i2c_acpi_register_devices(), so that it is once again called *after*
+the i2c_client-s hanging of the adapter have been created.
+
+This fixes the Microsoft Surface Go 2 hanging at boot.
+
+Fixes: 21653a4181ff ("i2c: core: Call i2c_acpi_install_space_handler() before i2c_acpi_register_devices()")
+Suggested-by: Maximilian Luz <luzmaximilian@gmail.com>
+Reported-and-tested-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/i2c/i2c-core-acpi.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 8820131da748..aed579942436 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
+ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ {
+ 	acpi_status status;
++	acpi_handle handle;
+ 
+ 	if (!has_acpi_companion(&adap->dev))
+ 		return;
+@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ 				     adap, NULL);
+ 	if (ACPI_FAILURE(status))
+ 		dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
++
++	if (!adap->dev.parent)
++		return;
++
++	handle = ACPI_HANDLE(adap->dev.parent);
++	if (!handle)
++		return;
++
++	acpi_walk_dep_device_list(handle);
+ }
+ 
+ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+@@ -754,7 +764,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
+ 		return -ENOMEM;
+ 	}
+ 
+-	acpi_walk_dep_device_list(handle);
+ 	return 0;
+ }
+ 
+-- 
+2.28.0
+