Remove Per File Key based hardware crypto framework

Remove the Per File Key logic based inline crypto support
for file encryption framework.

Change-Id: I90071562ba5c41b9db470363edac35c9fe5e4efa
Signed-off-by: Neeraj Soni <neersoni@codeaurora.org>
This commit is contained in:
Neeraj Soni 2020-08-07 20:11:12 +05:30 committed by Gerrit - the friendly Code Review server
parent 7d0fd9e6c3
commit 1924eafba6
110 changed files with 108 additions and 6142 deletions

View File

@ -266,11 +266,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -480,7 +478,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_RTC_CLASS=y
@ -639,7 +636,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -653,4 +649,3 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y

View File

@ -272,12 +272,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -494,7 +492,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_EDAC=y
@ -714,7 +711,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -729,6 +725,5 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_CRC8=y
CONFIG_XZ_DEC=y

View File

@ -265,11 +265,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -491,7 +489,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_HAPTICS=y
@ -652,7 +649,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -666,4 +662,3 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y

View File

@ -273,12 +273,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -505,7 +503,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_HAPTICS=y
@ -732,7 +729,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -747,5 +743,4 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_XZ_DEC=y

View File

@ -280,11 +280,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -509,7 +507,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -677,8 +674,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -720,7 +715,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -735,7 +729,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -290,12 +290,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -524,7 +522,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -707,8 +704,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -805,7 +800,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -821,7 +815,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -289,7 +289,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@ -647,8 +646,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -722,7 +719,6 @@ CONFIG_TEST_USER_COPY=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -738,7 +734,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -280,7 +280,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UEVENT=y
@ -628,8 +627,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -652,7 +649,6 @@ CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -667,7 +663,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -291,7 +291,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@ -660,8 +659,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -752,7 +749,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -768,7 +764,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -408,7 +408,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -550,5 +549,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -422,7 +422,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -600,4 +599,3 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y

View File

@ -408,7 +408,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -551,5 +550,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -421,7 +421,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -600,4 +599,3 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y

View File

@ -265,7 +265,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@ -426,7 +425,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -563,8 +561,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
@ -599,7 +595,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@ -615,7 +610,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -473,8 +473,6 @@ CONFIG_ANDROID_BINDER_IPC=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -497,9 +495,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_PANIC_TIMEOUT=-1
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_PFK=y
CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y
CONFIG_PFK_VIRTUALIZED=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y

View File

@ -484,8 +484,6 @@ CONFIG_ANDROID_BINDER_IPC=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -541,9 +539,6 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_TEST_USER_COPY=m
CONFIG_MEMTEST=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_PFK=y
CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y
CONFIG_PFK_VIRTUALIZED=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y

View File

@ -353,7 +353,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -495,5 +494,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -354,7 +354,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -496,5 +495,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -363,7 +363,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -504,5 +503,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -364,7 +364,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -505,5 +504,4 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_STACK_HASH_ORDER_SHIFT=12

View File

@ -278,7 +278,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UEVENT=y
@ -609,8 +608,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -633,7 +630,6 @@ CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -648,7 +644,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -290,7 +290,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@ -643,8 +642,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -731,7 +728,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -747,7 +743,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -279,13 +279,11 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -508,7 +506,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QTI_TRI_LED=y
@ -637,8 +634,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -675,7 +670,6 @@ CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -691,7 +685,6 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y

View File

@ -287,14 +287,12 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@ -522,7 +520,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QTI_TRI_LED=y
@ -668,8 +665,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -764,7 +759,6 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -781,7 +775,6 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y

View File

@ -285,7 +285,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@ -636,8 +635,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@ -716,7 +713,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -730,7 +726,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -280,7 +280,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@ -491,7 +490,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -641,8 +639,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -669,7 +665,6 @@ CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -684,7 +679,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -292,7 +292,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
@ -513,7 +512,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -679,8 +677,6 @@ CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -779,7 +775,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -795,7 +790,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -274,11 +274,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -495,7 +493,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -655,8 +652,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -698,7 +693,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -713,7 +707,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -284,12 +284,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -519,7 +517,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@ -692,8 +689,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -790,7 +785,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -806,7 +800,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -285,11 +285,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -663,8 +661,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -707,8 +703,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -723,7 +717,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -297,12 +297,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -693,8 +691,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_EXT4_FS_ENCRYPTION=y
CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
@ -791,8 +787,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_PFK_WRAPPED_KEY_SUPPORTED=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -808,7 +802,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -279,11 +279,9 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -510,7 +508,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_HAPTICS=y
@ -683,7 +680,6 @@ CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -698,7 +694,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -289,12 +289,10 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -525,7 +523,6 @@ CONFIG_MMC_CLKGATE=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_HAPTICS=y
@ -768,7 +765,6 @@ CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@ -784,7 +780,6 @@ CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y

View File

@ -577,18 +577,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
}
EXPORT_SYMBOL(bio_phys_segments);
static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
{
#ifdef CONFIG_PFK
dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
#ifdef CONFIG_DM_DEFAULT_KEY
dst->bi_crypt_key = src->bi_crypt_key;
dst->bi_crypt_skip = src->bi_crypt_skip;
#endif
dst->bi_dio_inode = src->bi_dio_inode;
#endif
}
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
@ -617,7 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@ -726,7 +714,6 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
}
}
bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
return bio;

View File

@ -1462,9 +1462,6 @@ static struct request *blk_old_get_request(struct request_queue *q,
/* q->queue_lock is unlocked at this point */
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
#ifdef CONFIG_PFK
rq->__dun = 0;
#endif
rq->bio = rq->biotail = NULL;
return rq;
}
@ -1688,9 +1685,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bio->bi_next = req->bio;
req->bio = bio;
#ifdef CONFIG_PFK
req->__dun = bio->bi_iter.bi_dun;
#endif
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@ -1840,9 +1834,6 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
else
req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
req->write_hint = bio->bi_write_hint;
#ifdef CONFIG_PFK
req->__dun = bio->bi_iter.bi_dun;
#endif
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
@ -2876,13 +2867,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
if (!blk_rq_is_passthrough(req)) {
if (!blk_rq_is_passthrough(req))
req->__sector += total_bytes >> 9;
#ifdef CONFIG_PFK
if (req->__dun)
req->__dun += total_bytes >> 12;
#endif
}
/* mixed attributes always follow the first bio */
if (req->rq_flags & RQF_MIXED_MERGE) {
@ -3245,9 +3231,6 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
#ifdef CONFIG_PFK
dst->__dun = blk_rq_dun(src);
#endif
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;

View File

@ -9,7 +9,7 @@
#include <linux/scatterlist.h>
#include <trace/events/block.h>
#include <linux/pfk.h>
#include "blk.h"
static struct bio *blk_bio_discard_split(struct request_queue *q,
@ -509,8 +509,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) &&
integrity_req_gap_back_merge(req, bio))
return 0;
if (blk_try_merge(req, bio) != ELEVATOR_BACK_MERGE)
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
req_set_nomerge(q, req);
@ -533,8 +531,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) &&
integrity_req_gap_front_merge(req, bio))
return 0;
if (blk_try_merge(req, bio) != ELEVATOR_FRONT_MERGE)
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
req_set_nomerge(q, req);
@ -668,11 +664,6 @@ static void blk_account_io_merge(struct request *req)
}
}
static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
{
return (!pfk_allow_merge_bio(bio, nxt));
}
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@ -711,9 +702,6 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->write_hint != next->write_hint)
return NULL;
if (crypto_not_mergeable(req->bio, next->bio))
return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@ -851,18 +839,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
if (req_op(rq) == REQ_OP_DISCARD &&
queue_max_discard_segments(rq->q) > 1) {
queue_max_discard_segments(rq->q) > 1)
return ELEVATOR_DISCARD_MERGE;
} else if (blk_rq_pos(rq) + blk_rq_sectors(rq) ==
bio->bi_iter.bi_sector) {
if (crypto_not_mergeable(rq->bio, bio))
return ELEVATOR_NO_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
} else if (blk_rq_pos(rq) - bio_sectors(bio) ==
bio->bi_iter.bi_sector) {
if (crypto_not_mergeable(bio, rq->bio))
return ELEVATOR_NO_MERGE;
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}

View File

@ -443,7 +443,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
enum elv_merge ret;
/*
* Levels of merges:
* nomerges: No merges at all attempted
@ -456,11 +456,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
/*
* First try one-hit cache.
*/
if (q->last_merge) {
if (!elv_bio_merge_ok(q->last_merge, bio))
return ELEVATOR_NO_MERGE;
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;

View File

@ -15,7 +15,9 @@
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#ifdef CONFIG_PFK
#include <linux/pfk.h>
#endif
#include <crypto/ice.h>
#define PART_BITS 4

View File

@ -771,8 +771,4 @@ config CRYPTO_DEV_ARTPEC6
To compile this driver as a module, choose M here.
if ARCH_QCOM
source drivers/crypto/msm/Kconfig
endif
endif # CRYPTO_HW

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1443,9 +1443,9 @@ static void qcom_ice_debug(struct platform_device *pdev)
qcom_ice_dump_test_bus(ice_dev);
pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
ice_dev->ice_instance_type,
(unsigned long long)ice_dev->ice_reset_start_time,
(unsigned long long)ice_dev->ice_reset_complete_time);
ice_dev->ice_instance_type,
(unsigned long long)ice_dev->ice_reset_start_time.tv64,
(unsigned long long)ice_dev->ice_reset_complete_time.tv64);
if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
ice_dev->ice_reset_start_time)) > 0)

View File

@ -286,24 +286,6 @@ config DM_CRYPT
If unsure, say N.
config DM_DEFAULT_KEY
tristate "Default-key crypt target support"
depends on BLK_DEV_DM
depends on PFK
---help---
This (currently Android-specific) device-mapper target allows you to
create a device that assigns a default encryption key to bios that
don't already have one. This can sit between inline cryptographic
acceleration hardware and filesystems that use it. This ensures that
where the filesystem doesn't explicitly specify a key, such as for
filesystem metadata, a default key will be used instead, leaving no
sectors unencrypted.
To compile this code as a module, choose M here: the module will be
called dm-default-key.
If unsure, say N.
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM

View File

@ -43,7 +43,6 @@ obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o

View File

@ -126,7 +126,7 @@ struct iv_tcw_private {
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
DM_CRYPT_ENCRYPT_OVERRIDE };
};
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
@ -2678,8 +2678,6 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
else if (!strcasecmp(opt_string, "allow_encrypt_override"))
set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags);
else {
ti->error = "Invalid feature arguments";
return -EINVAL;
@ -2889,15 +2887,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct crypt_config *cc = ti->private;
/*
* If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD,
* just bypass crypt queues.
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
* - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
if (unlikely(bio->bi_opf & REQ_PREFLUSH) ||
(unlikely(bio->bi_opf & REQ_NOENCRYPT) &&
test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) ||
bio_op(bio) == REQ_OP_DISCARD) {
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) {
bio_set_dev(bio, cc->dev->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
@ -2984,8 +2979,6 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE,
&cc->flags);
if (cc->on_disk_tag_size)
num_feature_args++;
if (num_feature_args) {
@ -3002,8 +2995,6 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags))
DMEMIT(" allow_encrypt_override");
}
break;

View File

@ -1,306 +0,0 @@
/*
* Copyright (C) 2017 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/pfk.h>
#define DM_MSG_PREFIX "default-key"
#define DEFAULT_DUN_OFFSET 1
struct default_key_c {
struct dm_dev *dev;
sector_t start;
struct blk_encryption_key key;
bool set_dun;
u64 dun_offset;
};
static void default_key_dtr(struct dm_target *ti)
{
struct default_key_c *dkc = ti->private;
if (dkc->dev)
dm_put_device(ti, dkc->dev);
kzfree(dkc);
}
static int default_key_ctr_optional(struct dm_target *ti,
unsigned int argc, char **argv)
{
struct default_key_c *dkc = ti->private;
struct dm_arg_set as = {0};
static const struct dm_arg _args[] = {
{0, 2, "Invalid number of feature args"},
};
unsigned int opt_params;
const char *opt_string;
char dummy;
int ret;
as.argc = argc;
as.argv = argv;
ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
if (ret)
return ret;
while (opt_params--) {
opt_string = dm_shift_arg(&as);
if (!opt_string) {
ti->error = "Not enough feature arguments";
return -EINVAL;
}
if (!strcasecmp(opt_string, "set_dun")) {
dkc->set_dun = true;
} else if (sscanf(opt_string, "dun_offset:%llu%c",
&dkc->dun_offset, &dummy) == 1) {
if (dkc->dun_offset == 0) {
ti->error = "dun_offset cannot be 0";
return -EINVAL;
}
} else {
ti->error = "Invalid feature arguments";
return -EINVAL;
}
}
if (dkc->dun_offset && !dkc->set_dun) {
ti->error = "Invalid: dun_offset without set_dun";
return -EINVAL;
}
if (dkc->set_dun && !dkc->dun_offset)
dkc->dun_offset = DEFAULT_DUN_OFFSET;
return 0;
}
/*
* Construct a default-key mapping: <mode> <key> <dev_path> <start>
*/
static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct default_key_c *dkc;
size_t key_size;
unsigned long long tmp;
char dummy;
int err;
if (argc < 4) {
ti->error = "Too few arguments";
return -EINVAL;
}
dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
if (!dkc) {
ti->error = "Out of memory";
return -ENOMEM;
}
ti->private = dkc;
if (strcmp(argv[0], "AES-256-XTS") != 0) {
ti->error = "Unsupported encryption mode";
err = -EINVAL;
goto bad;
}
key_size = strlen(argv[1]);
if (key_size != 2 * BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS) {
ti->error = "Unsupported key size";
err = -EINVAL;
goto bad;
}
key_size /= 2;
if (hex2bin(dkc->key.raw, argv[1], key_size) != 0) {
ti->error = "Malformed key string";
err = -EINVAL;
goto bad;
}
err = dm_get_device(ti, argv[2], dm_table_get_mode(ti->table),
&dkc->dev);
if (err) {
ti->error = "Device lookup failed";
goto bad;
}
if (sscanf(argv[3], "%llu%c", &tmp, &dummy) != 1) {
ti->error = "Invalid start sector";
err = -EINVAL;
goto bad;
}
dkc->start = tmp;
if (argc > 4) {
err = default_key_ctr_optional(ti, argc - 4, &argv[4]);
if (err)
goto bad;
}
if (!blk_queue_inlinecrypt(bdev_get_queue(dkc->dev->bdev))) {
ti->error = "Device does not support inline encryption";
err = -EINVAL;
goto bad;
}
/* Pass flush requests through to the underlying device. */
ti->num_flush_bios = 1;
/*
* We pass discard requests through to the underlying device, although
* the discarded blocks will be zeroed, which leaks information about
* unused blocks. It's also impossible for dm-default-key to know not
* to decrypt discarded blocks, so they will not be read back as zeroes
* and we must set discard_zeroes_data_unsupported.
*/
ti->num_discard_bios = 1;
/*
* It's unclear whether WRITE_SAME would work with inline encryption; it
* would depend on whether the hardware duplicates the data before or
* after encryption. But since the internal storage in some devices
* (MSM8998-based) doesn't claim to support WRITE_SAME anyway, we don't
* currently have a way to test it. Leave it disabled it for now.
*/
/*ti->num_write_same_bios = 1;*/
return 0;
bad:
default_key_dtr(ti);
return err;
}
static int default_key_map(struct dm_target *ti, struct bio *bio)
{
const struct default_key_c *dkc = ti->private;
bio_set_dev(bio, dkc->dev->bdev);
if (bio_sectors(bio)) {
bio->bi_iter.bi_sector = dkc->start +
dm_target_offset(ti, bio->bi_iter.bi_sector);
}
if (!bio->bi_crypt_key && !bio->bi_crypt_skip) {
bio->bi_crypt_key = &dkc->key;
if (dkc->set_dun)
bio_dun(bio) = (dm_target_offset(ti,
bio->bi_iter.bi_sector)
>> 3) + dkc->dun_offset;
}
return DM_MAPIO_REMAPPED;
}
static void default_key_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result,
unsigned int maxlen)
{
const struct default_key_c *dkc = ti->private;
unsigned int sz = 0;
int num_feature_args = 0;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
/* encryption mode */
DMEMIT("AES-256-XTS");
/* reserved for key; dm-crypt shows it, but we don't for now */
DMEMIT(" -");
/* name of underlying device, and the start sector in it */
DMEMIT(" %s %llu", dkc->dev->name,
(unsigned long long)dkc->start);
num_feature_args += dkc->set_dun;
num_feature_args += dkc->set_dun
&& dkc->dun_offset != DEFAULT_DUN_OFFSET;
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (dkc->set_dun)
DMEMIT(" set_dun");
if (dkc->set_dun
&& dkc->dun_offset != DEFAULT_DUN_OFFSET)
DMEMIT(" dun_offset:%llu", dkc->dun_offset);
}
break;
}
}
static int default_key_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct default_key_c *dkc = ti->private;
struct dm_dev *dev = dkc->dev;
*bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (dkc->start ||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
return 1;
return 0;
}
static int default_key_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn,
void *data)
{
struct default_key_c *dkc = ti->private;
return fn(ti, dkc->dev, dkc->start, ti->len, data);
}
static struct target_type default_key_target = {
.name = "default-key",
.version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = default_key_ctr,
.dtr = default_key_dtr,
.map = default_key_map,
.status = default_key_status,
.prepare_ioctl = default_key_prepare_ioctl,
.iterate_devices = default_key_iterate_devices,
};
static int __init dm_default_key_init(void)
{
return dm_register_target(&default_key_target);
}
static void __exit dm_default_key_exit(void)
{
dm_unregister_target(&default_key_target);
}
module_init(dm_default_key_init);
module_exit(dm_default_key_exit);
MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
MODULE_LICENSE("GPL v2");

View File

@ -1689,16 +1689,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
static int queue_supports_inline_encryption(struct dm_target *ti,
struct dm_dev *dev,
sector_t start, sector_t len,
void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && blk_queue_inlinecrypt(q);
}
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
@ -1879,11 +1869,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption))
queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q);
dm_table_verify_integrity(t);
/*

View File

@ -52,7 +52,6 @@
#include <linux/ion_kernel.h>
#include <linux/compat.h>
#include "compat_qseecom.h"
#include <linux/pfk.h>
#include <linux/kthread.h>
#define QSEECOM_DEV "qseecom"
@ -8115,19 +8114,6 @@ static long qseecom_ioctl(struct file *file,
qcom_ice_set_fde_flag(ice_data.flag);
break;
}
case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
struct qseecom_ice_key_data_t key_data;
ret = copy_from_user(&key_data, argp, sizeof(key_data));
if (ret) {
pr_err("copy from user failed\n");
return -EFAULT;
}
pfk_fbe_clear_key((const unsigned char *) key_data.key,
key_data.key_len, (const unsigned char *)
key_data.salt, key_data.salt_len);
break;
}
default:
pr_err("Invalid IOCTL: 0x%x\n", cmd);
return -EINVAL;

View File

@ -216,8 +216,6 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
host->max_req_size / 512));
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
blk_queue_max_segments(mq->queue, host->max_segs);
if (host->inlinecrypt_support)
queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@ -481,8 +479,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
if (host->inlinecrypt_support)
queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue);
sema_init(&mq->thread_sem, 1);

View File

@ -439,17 +439,6 @@ config MMC_SDHCI_MSM
If unsure, say N.
config MMC_SDHCI_MSM_ICE
bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core"
depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE
help
This selects the QTI specific additions to support Inline Crypto
Engine (ICE). ICE accelerates the crypto operations and maintains
the high SDHCI performance.
Select this if you have ICE supported for SDHCI on QTI chipset.
If unsure, say N.
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
depends on ARCH_MXC || PPC_MPC512x

View File

@ -86,7 +86,6 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_MSM_ICE) += sdhci-msm-ice.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, 2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -376,7 +376,6 @@ static int cmdq_enable(struct mmc_host *mmc)
{
int err = 0;
u32 cqcfg;
u32 cqcap = 0;
bool dcmd_enable;
struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
@ -405,24 +404,6 @@ static int cmdq_enable(struct mmc_host *mmc)
cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
(dcmd_enable ? CQ_DCMD : 0));
cqcap = cmdq_readl(cq_host, CQCAP);
if (cqcap & CQCAP_CS) {
/*
* In case host controller supports cryptographic operations
* then, it uses 128bit task descriptor. Upper 64 bits of task
* descriptor would be used to pass crypto specific informaton.
*/
cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
CMDQ_TASK_DESC_SZ_128;
cqcfg |= CQ_ICE_ENABLE;
/*
* For SDHC v5.0 onwards, ICE 3.0 specific registers are added
* in CQ register space, due to which few CQ registers are
* shifted. Set offset_changed boolean to use updated address.
*/
cq_host->offset_changed = true;
}
cmdq_writel(cq_host, cqcfg, CQCFG);
/* enable CQ_HOST */
cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
@ -738,30 +719,6 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
upper_32_bits(*task_desc));
}
static inline
void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc,
u64 ice_ctx)
{
u64 *ice_desc = NULL;
if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) {
/*
* Get the address of ice context for the given task descriptor.
* ice context is present in the upper 64bits of task descriptor
* ice_conext_base_address = task_desc + 8-bytes
*/
ice_desc = (__le64 *)((u8 *)task_desc +
CQ_TASK_DESC_TASK_PARAMS_SIZE);
memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE);
/*
* Assign upper 64bits data of task descritor with ice context
*/
if (ice_ctx)
*ice_desc = cpu_to_le64(ice_ctx);
}
}
static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@ -785,7 +742,6 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
u32 tag = mrq->cmdq_req->tag;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
struct sdhci_host *host = mmc_priv(mmc);
u64 ice_ctx = 0;
if (!cq_host->enabled) {
pr_err("%s: CMDQ host not enabled yet !!!\n",
@ -804,31 +760,19 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
goto ring_doorbell;
}
if (cq_host->ops->crypto_cfg) {
err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
if (err) {
mmc->err_stats[MMC_ERR_ICE_CFG]++;
pr_err("%s: failed to configure crypto: err %d tag %d\n",
mmc_hostname(mmc), err, tag);
goto ice_err;
}
}
task_desc = (__le64 __force *)get_desc(cq_host, tag);
cmdq_prep_task_desc(mrq, &data, 1,
(mrq->cmdq_req->cmdq_req_flags & QBR));
*task_desc = cpu_to_le64(data);
cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx);
cmdq_log_task_desc_history(cq_host, *task_desc, false);
err = cmdq_prep_tran_desc(mrq, cq_host, tag);
if (err) {
pr_err("%s: %s: failed to setup tx desc: %d\n",
mmc_hostname(mmc), __func__, err);
goto desc_err;
goto out;
}
cq_host->mrq_slot[tag] = mrq;
@ -848,20 +792,6 @@ ring_doorbell:
/* Commit the doorbell write immediately */
wmb();
return err;
desc_err:
if (cq_host->ops->crypto_cfg_end) {
err = cq_host->ops->crypto_cfg_end(mmc, mrq);
if (err) {
pr_err("%s: failed to end ice config: err %d tag %d\n",
mmc_hostname(mmc), err, tag);
}
}
if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
cq_host->ops->crypto_cfg_reset)
cq_host->ops->crypto_cfg_reset(mmc, tag);
ice_err:
if (err)
cmdq_runtime_pm_put(cq_host);
out:
@ -873,7 +803,6 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
struct mmc_request *mrq;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
int offset = 0;
int err = 0;
if (cq_host->offset_changed)
offset = CQ_V5_VENDOR_CFG;
@ -888,18 +817,6 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
cmdq_runtime_pm_put(cq_host);
if (!(mrq->cmdq_req->cmdq_req_flags & DCMD)) {
if (cq_host->ops->crypto_cfg_end) {
err = cq_host->ops->crypto_cfg_end(mmc, mrq);
if (err) {
pr_err("%s: failed to end ice config: err %d tag %d\n",
mmc_hostname(mmc), err, tag);
}
}
}
if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
cq_host->ops->crypto_cfg_reset)
cq_host->ops->crypto_cfg_reset(mmc, tag);
mrq->done(mrq);
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -18,13 +18,11 @@
#define CQVER 0x00
/* capabilities */
#define CQCAP 0x04
#define CQCAP_CS (1 << 28)
/* configuration */
#define CQCFG 0x08
#define CQ_DCMD 0x00001000
#define CQ_TASK_DESC_SZ 0x00000100
#define CQ_ENABLE 0x00000001
#define CQ_ICE_ENABLE 0x00000002
/* control */
#define CQCTL 0x0C
@ -153,9 +151,6 @@
#define CQ_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
#define CQ_TASK_DESC_TASK_PARAMS_SIZE 8
#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8
struct task_history {
u64 task;
bool is_dcmd;
@ -173,7 +168,6 @@ struct cmdq_host {
u32 dcmd_slot;
u32 caps;
#define CMDQ_TASK_DESC_SZ_128 0x1
#define CMDQ_CAP_CRYPTO_SUPPORT 0x2
u32 quirks;
#define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
@ -222,10 +216,6 @@ struct cmdq_host_ops {
void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
int (*reset)(struct mmc_host *mmc);
void (*post_cqe_halt)(struct mmc_host *mmc);
int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
u32 slot, u64 *ice_ctx);
int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq);
void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
};
static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)

View File

@ -1,587 +0,0 @@
/*
* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "sdhci-msm-ice.h"
static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
{
struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x",
__func__, error);
if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
}
static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
{
struct device_node *node;
struct platform_device *ice_pdev = NULL;
node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
if (!node) {
dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
__func__);
goto out;
}
ice_pdev = qcom_ice_get_pdevice(node);
out:
return ice_pdev;
}
static
struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev)
{
struct qcom_ice_variant_ops *ice_vops = NULL;
struct device_node *node;
node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
if (!node) {
dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
__func__);
goto out;
}
ice_vops = qcom_ice_get_variant_ops(node);
of_node_put(node);
out:
return ice_vops;
}
static
void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
u32 config = 0;
u32 ice_cap = 0;
/*
* Enable the cryptographic support inside SDHC.
* This is a global config which needs to be enabled
* all the time.
* Only when it it is enabled, the ICE_HCI capability
* will get reflected in CQCAP register.
*/
config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
if (enable)
config &= ~DISABLE_CRYPTO;
else
config |= DISABLE_CRYPTO;
writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
/*
* CQCAP register is in different register space from above
* ice global enable register. So a mb() is required to ensure
* above write gets completed before reading the CQCAP register.
*/
mb();
/*
* Check if ICE HCI capability support is present
* If present, enable it.
*/
ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES);
if (ice_cap & ICE_HCI_SUPPORT) {
config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG);
if (enable)
config |= CRYPTO_GENERAL_ENABLE;
else
config &= ~CRYPTO_GENERAL_ENABLE;
writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG);
}
}
int sdhci_msm_ice_get_dev(struct sdhci_host *host)
{
struct device *sdhc_dev;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
if (!msm_host || !msm_host->pdev) {
pr_err("%s: invalid msm_host %p or msm_host->pdev\n",
__func__, msm_host);
return -EINVAL;
}
sdhc_dev = &msm_host->pdev->dev;
msm_host->ice.vops = sdhci_msm_ice_get_vops(sdhc_dev);
msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev);
if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
dev_err(sdhc_dev, "%s: ICE device not probed yet\n",
__func__);
msm_host->ice.pdev = NULL;
msm_host->ice.vops = NULL;
return -EPROBE_DEFER;
}
if (!msm_host->ice.pdev) {
dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__);
msm_host->ice.vops = NULL;
return -ENODEV;
}
if (!msm_host->ice.vops) {
dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__);
msm_host->ice.pdev = NULL;
return -ENODEV;
}
msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
return 0;
}
static
int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host)
{
struct resource *ice_memres = NULL;
struct platform_device *pdev = msm_host->pdev;
int err = 0;
if (!msm_host->ice_hci_support)
goto out;
/*
* ICE HCI registers are present in cmdq register space.
* So map the cmdq mem for accessing ICE HCI registers.
*/
ice_memres = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "cmdq_mem");
if (!ice_memres) {
dev_err(&pdev->dev, "Failed to get iomem resource for ice\n");
err = -EINVAL;
goto out;
}
msm_host->cryptoio = devm_ioremap(&pdev->dev,
ice_memres->start,
resource_size(ice_memres));
if (!msm_host->cryptoio) {
dev_err(&pdev->dev, "Failed to remap registers\n");
err = -ENOMEM;
}
out:
return err;
}
int sdhci_msm_ice_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
if (msm_host->ice.vops->init) {
err = sdhci_msm_ice_pltfm_init(msm_host);
if (err)
goto out;
if (msm_host->ice_hci_support)
sdhci_msm_enable_ice_hci(host, true);
err = msm_host->ice.vops->init(msm_host->ice.pdev,
msm_host,
sdhci_msm_ice_error_cb);
if (err) {
pr_err("%s: ice init err %d\n",
mmc_hostname(host->mmc), err);
sdhci_msm_ice_print_regs(host);
if (msm_host->ice_hci_support)
sdhci_msm_enable_ice_hci(host, false);
goto out;
}
msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
}
out:
return err;
}
void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
{
writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
}
static
int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
unsigned int *bypass, short *key_index)
{
int err = 0;
struct ice_data_setting ice_set;
memset(&ice_set, 0, sizeof(struct ice_data_setting));
if (msm_host->ice.vops->config_start) {
err = msm_host->ice.vops->config_start(
msm_host->ice.pdev,
req, &ice_set, false);
if (err) {
pr_err("%s: ice config failed %d\n",
mmc_hostname(msm_host->mmc), err);
return err;
}
}
/* if writing data command */
if (rq_data_dir(req) == WRITE)
*bypass = ice_set.encr_bypass ?
SDHCI_MSM_ICE_ENABLE_BYPASS :
SDHCI_MSM_ICE_DISABLE_BYPASS;
/* if reading data command */
else if (rq_data_dir(req) == READ)
*bypass = ice_set.decr_bypass ?
SDHCI_MSM_ICE_ENABLE_BYPASS :
SDHCI_MSM_ICE_DISABLE_BYPASS;
*key_index = ice_set.crypto_data.key_index;
return err;
}
static
void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot,
unsigned int bypass, short key_index, u32 cdu_sz)
{
unsigned int ctrl_info_val = 0;
/* Configure ICE index */
ctrl_info_val =
(key_index &
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
/* Configure data unit size of transfer request */
ctrl_info_val |=
(cdu_sz &
MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU;
/* Configure ICE bypass mode */
ctrl_info_val |=
(bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS;
writel_relaxed((lba & 0xFFFFFFFF),
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot);
writel_relaxed(((lba >> 32) & 0xFFFFFFFF),
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
writel_relaxed(ctrl_info_val,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
/* Ensure ICE registers are configured before issuing SDHCI request */
mb();
}
static inline
void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass,
short key_index, u64 *ice_ctx)
{
/*
* The naming convention got changed between ICE2.0 and ICE3.0
* registers fields. Below is the equivalent names for
* ICE3.0 Vs ICE2.0:
* Data Unit Number(DUN) == Logical Base address(LBA)
* Crypto Configuration index (CCI) == Key Index
* Crypto Enable (CE) == !BYPASS
*/
if (ice_ctx)
*ice_ctx = DATA_UNIT_NUM(dun) |
CRYPTO_CONFIG_INDEX(key_index) |
CRYPTO_ENABLE(!bypass);
}
static
void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host,
u64 dun, unsigned int bypass, short key_index)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
unsigned int crypto_params = 0;
/*
* The naming convention got changed between ICE2.0 and ICE3.0
* registers fields. Below is the equivalent names for
* ICE3.0 Vs ICE2.0:
* Data Unit Number(DUN) == Logical Base address(LBA)
* Crypto Configuration index (CCI) == Key Index
* Crypto Enable (CE) == !BYPASS
*/
/* Configure ICE bypass mode */
crypto_params |=
((!bypass) & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE)
<< OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE;
/* Configure Crypto Configure Index (CCI) */
crypto_params |= (key_index &
MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI)
<< OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI;
writel_relaxed((crypto_params & 0xFFFFFFFF),
msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS);
/* Update DUN */
writel_relaxed((dun & 0xFFFFFFFF),
msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN);
/* Ensure ICE registers are configured before issuing SDHCI request */
mb();
}
int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
short key_index = 0;
u64 dun = 0;
unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
struct request *req;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
WARN_ON(!mrq);
if (!mrq)
return -EINVAL;
req = mrq->req;
if (req && req->bio) {
#ifdef CONFIG_PFK
if (bio_dun(req->bio)) {
dun = bio_dun(req->bio);
cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
} else {
dun = req->__sector;
}
#else
dun = req->__sector;
#endif
err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
if (err)
return err;
pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
mmc_hostname(host->mmc),
(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
slot, bypass, key_index);
}
if (msm_host->ice_hci_support) {
/* For ICE HCI / ICE3.0 */
sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass,
key_index);
} else {
/* For ICE versions earlier to ICE3.0 */
sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
cdu_sz);
}
return 0;
}
int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
short key_index = 0;
u64 dun = 0;
unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
struct request *req;
u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
WARN_ON(!mrq);
if (!mrq)
return -EINVAL;
req = mrq->req;
if (req && req->bio) {
#ifdef CONFIG_PFK
if (bio_dun(req->bio)) {
dun = bio_dun(req->bio);
cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
} else {
dun = req->__sector;
}
#else
dun = req->__sector;
#endif
err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
if (err)
return err;
pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
mmc_hostname(host->mmc),
(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
slot, bypass, key_index);
}
if (msm_host->ice_hci_support) {
/* For ICE HCI / ICE3.0 */
sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index,
ice_ctx);
} else {
/* For ICE versions earlier to ICE3.0 */
sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
cdu_sz);
}
return 0;
}
int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
struct request *req;
if (!host->is_crypto_en)
return 0;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
req = mrq->req;
if (req) {
if (msm_host->ice.vops->config_end) {
err = msm_host->ice.vops->config_end(req);
if (err) {
pr_err("%s: ice config end failed %d\n",
mmc_hostname(host->mmc), err);
return err;
}
}
}
return 0;
}
int sdhci_msm_ice_reset(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state before reset %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
if (msm_host->ice.vops->reset) {
err = msm_host->ice.vops->reset(msm_host->ice.pdev);
if (err) {
pr_err("%s: ice reset failed %d\n",
mmc_hostname(host->mmc), err);
sdhci_msm_ice_print_regs(host);
return err;
}
}
/* If ICE HCI support is present then re-enable it */
if (msm_host->ice_hci_support)
sdhci_msm_enable_ice_hci(host, true);
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state after reset %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
return 0;
}
int sdhci_msm_ice_resume(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
if (msm_host->ice.state !=
SDHCI_MSM_ICE_STATE_SUSPENDED) {
pr_err("%s: ice is in invalid state before resume %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
if (msm_host->ice.vops->resume) {
err = msm_host->ice.vops->resume(msm_host->ice.pdev);
if (err) {
pr_err("%s: ice resume failed %d\n",
mmc_hostname(host->mmc), err);
return err;
}
}
msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
return 0;
}
int sdhci_msm_ice_suspend(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
if (msm_host->ice.state !=
SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state before resume %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
if (msm_host->ice.vops->suspend) {
err = msm_host->ice.vops->suspend(msm_host->ice.pdev);
if (err) {
pr_err("%s: ice suspend failed %d\n",
mmc_hostname(host->mmc), err);
return -EINVAL;
}
}
msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED;
return 0;
}
int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int stat = -EINVAL;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
return -EINVAL;
}
if (msm_host->ice.vops->status) {
*ice_status = 0;
stat = msm_host->ice.vops->status(msm_host->ice.pdev);
if (stat < 0) {
pr_err("%s: ice get sts failed %d\n",
mmc_hostname(host->mmc), stat);
return -EINVAL;
}
*ice_status = stat;
}
return 0;
}
void sdhci_msm_ice_print_regs(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
if (msm_host->ice.vops->debug)
msm_host->ice.vops->debug(msm_host->ice.pdev);
}

View File

@ -1,173 +0,0 @@
/*
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __SDHCI_MSM_ICE_H__
#define __SDHCI_MSM_ICE_H__
#include <linux/io.h>
#include <linux/of.h>
#include <linux/blkdev.h>
#include <crypto/ice.h>
#include "sdhci-msm.h"
#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto"
/* Timeout waiting for ICE initialization, that requires TZ access */
#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS 500
/*
* SDHCI host controller ICE registers. There are n [0..31]
* of each of these registers
*/
#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS 32
#define CORE_VENDOR_SPEC_ICE_CTRL 0x300
#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n 0x304
#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308
#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C
/* ICE3.0 register which got added cmdq reg space */
#define ICE_CQ_CAPABILITIES 0x04
#define ICE_HCI_SUPPORT (1 << 28)
#define ICE_CQ_CONFIG 0x08
#define CRYPTO_GENERAL_ENABLE (1 << 1)
#define ICE_NONCQ_CRYPTO_PARAMS 0x70
#define ICE_NONCQ_CRYPTO_DUN 0x74
/* ICE3.0 register which got added hc reg space */
#define HC_VENDOR_SPECIFIC_FUNC4 0x260
#define DISABLE_CRYPTO (1 << 15)
#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800
#define ICE_SW_RST_EN (1 << 0)
/* SDHCI MSM ICE CTRL Info register offset */
enum {
OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0,
OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 1,
OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 6,
OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0,
OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE = 8,
};
/* SDHCI MSM ICE CTRL Info register masks */
enum {
MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1,
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7,
MASK_SDHCI_MSM_ICE_HCI_PARAM_CE = 0x1,
MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0xff
};
/* SDHCI MSM ICE encryption/decryption bypass state */
enum {
SDHCI_MSM_ICE_DISABLE_BYPASS = 0,
SDHCI_MSM_ICE_ENABLE_BYPASS = 1,
};
/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */
enum {
SDHCI_MSM_ICE_TR_DATA_UNIT_512_B = 0,
SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB = 1,
SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB = 2,
SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB = 3,
SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB = 4,
SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB = 5,
SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB = 6,
SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB = 7,
};
/* SDHCI MSM ICE internal state */
enum {
SDHCI_MSM_ICE_STATE_DISABLED = 0,
SDHCI_MSM_ICE_STATE_ACTIVE = 1,
SDHCI_MSM_ICE_STATE_SUSPENDED = 2,
};
/* crypto context fields in cmdq data command task descriptor */
#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0)
#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32)
#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47)
#ifdef CONFIG_MMC_SDHCI_MSM_ICE
int sdhci_msm_ice_get_dev(struct sdhci_host *host);
int sdhci_msm_ice_init(struct sdhci_host *host);
void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot);
int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq);
int sdhci_msm_ice_reset(struct sdhci_host *host);
int sdhci_msm_ice_resume(struct sdhci_host *host);
int sdhci_msm_ice_suspend(struct sdhci_host *host);
int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status);
void sdhci_msm_ice_print_regs(struct sdhci_host *host);
#else
inline int sdhci_msm_ice_get_dev(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
if (msm_host) {
msm_host->ice.pdev = NULL;
msm_host->ice.vops = NULL;
}
return -ENODEV;
}
inline int sdhci_msm_ice_init(struct sdhci_host *host)
{
return 0;
}
inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
{
}
inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot)
{
return 0;
}
static inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
return 0;
}
static inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host,
struct mmc_request *mrq)
{
return 0;
}
inline int sdhci_msm_ice_reset(struct sdhci_host *host)
{
return 0;
}
inline int sdhci_msm_ice_resume(struct sdhci_host *host)
{
return 0;
}
inline int sdhci_msm_ice_suspend(struct sdhci_host *host)
{
return 0;
}
inline int sdhci_msm_ice_get_status(struct sdhci_host *host,
int *ice_status)
{
return 0;
}
inline void sdhci_msm_ice_print_regs(struct sdhci_host *host)
{
}
#endif /* CONFIG_MMC_SDHCI_MSM_ICE */
#endif /* __SDHCI_MSM_ICE_H__ */

View File

@ -42,7 +42,6 @@
#include <trace/events/mmc.h>
#include "sdhci-msm.h"
#include "sdhci-msm-ice.h"
#include "cmdq_hci.h"
#define QOS_REMOVE_DELAY_MS 10
@ -2055,26 +2054,20 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
}
}
if (msm_host->ice.pdev) {
if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
&ice_clk_table, &ice_clk_table_len, 0)) {
dev_err(dev, "failed parsing supported ice clock rates\n");
goto out;
}
if (!ice_clk_table || !ice_clk_table_len) {
dev_err(dev, "Invalid clock table\n");
goto out;
}
if (ice_clk_table_len != 2) {
dev_err(dev, "Need max and min frequencies in the table\n");
goto out;
}
pdata->sup_ice_clk_table = ice_clk_table;
pdata->sup_ice_clk_cnt = ice_clk_table_len;
pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
&ice_clk_table, &ice_clk_table_len, 0)) {
if (ice_clk_table && ice_clk_table_len) {
if (ice_clk_table_len != 2) {
dev_err(dev, "Need max and min frequencies\n");
goto out;
}
pdata->sup_ice_clk_table = ice_clk_table;
pdata->sup_ice_clk_cnt = ice_clk_table_len;
pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
dev_dbg(dev, "ICE clock rates (Hz): max: %u min: %u\n",
pdata->ice_clk_max, pdata->ice_clk_min);
}
}
pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
@ -3782,7 +3775,6 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
int i, index = 0;
u32 test_bus_val = 0;
u32 debug_reg[MAX_TEST_BUS] = {0};
u32 sts = 0;
sdhci_msm_cache_debug_data(host);
pr_info("----------- VENDOR REGISTER DUMP -----------\n");
@ -3855,28 +3847,10 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, i + 3, debug_reg[i], debug_reg[i+1],
debug_reg[i+2], debug_reg[i+3]);
if (host->is_crypto_en) {
sdhci_msm_ice_get_status(host, &sts);
pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
sdhci_msm_ice_print_regs(host);
}
}
static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
/* Set ICE core to be reset in sync with SDHC core */
if (msm_host->ice.pdev) {
if (msm_host->ice_hci_support)
writel_relaxed(1, host->ioaddr +
HC_VENDOR_SPECIFIC_ICE_CTRL);
else
writel_relaxed(1,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
}
sdhci_reset(host, mask);
}
@ -4516,11 +4490,6 @@ static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
}
static struct sdhci_ops sdhci_msm_ops = {
.crypto_engine_cfg = sdhci_msm_ice_cfg,
.crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
.crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
.crypto_engine_reset = sdhci_msm_ice_reset,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.check_power_status = sdhci_msm_check_power_status,
.platform_execute_tuning = sdhci_msm_execute_tuning,
@ -4646,7 +4615,6 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
msm_host->caps_0 = caps;
if ((major == 1) && (minor >= 0x6b)) {
msm_host->ice_hci_support = true;
host->cdr_support = true;
}
@ -4750,31 +4718,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
/* get the ice device vops if present */
ret = sdhci_msm_ice_get_dev(host);
if (ret == -EPROBE_DEFER) {
/*
* SDHCI driver might be probed before ICE driver does.
* In that case we would like to return EPROBE_DEFER code
* in order to delay its probing.
*/
dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
__func__, ret);
goto pltfm_free;
} else if (ret == -ENODEV) {
/*
* ICE device is not enabled in DTS file. No need for further
* initialization of ICE driver.
*/
dev_warn(&pdev->dev, "%s: ICE device is not enabled",
__func__);
} else if (ret) {
dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
__func__, ret);
goto pltfm_free;
}
/* Extract platform data */
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
@ -4849,26 +4792,24 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
}
if (msm_host->ice.pdev) {
/* Setup SDC ICE clock */
msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
if (!IS_ERR(msm_host->ice_clk)) {
/* ICE core has only one clock frequency for now */
ret = clk_set_rate(msm_host->ice_clk,
msm_host->pdata->ice_clk_max);
if (ret) {
dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
ret,
msm_host->pdata->ice_clk_max);
goto bus_aggr_clk_disable;
}
ret = clk_prepare_enable(msm_host->ice_clk);
if (ret)
goto bus_aggr_clk_disable;
msm_host->ice_clk_rate =
msm_host->pdata->ice_clk_max;
/* Setup SDC ICE clock */
msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
if (!IS_ERR(msm_host->ice_clk)) {
/* ICE core has only one clock frequency for now */
ret = clk_set_rate(msm_host->ice_clk,
msm_host->pdata->ice_clk_max);
if (ret) {
dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
ret,
msm_host->pdata->ice_clk_max);
goto bus_aggr_clk_disable;
}
ret = clk_prepare_enable(msm_host->ice_clk);
if (ret)
goto bus_aggr_clk_disable;
msm_host->ice_clk_rate =
msm_host->pdata->ice_clk_max;
}
/* Setup SDC MMC clock */
@ -5117,22 +5058,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
/* Initialize ICE if present */
if (msm_host->ice.pdev) {
ret = sdhci_msm_ice_init(host);
if (ret) {
dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
mmc_hostname(host->mmc), ret);
ret = -EINVAL;
goto vreg_deinit;
}
host->is_crypto_en = true;
msm_host->mmc->inlinecrypt_support = true;
/* Packed commands cannot be encrypted/decrypted using ICE */
msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
MMC_CAP2_PACKED_WR_CONTROL);
}
init_completion(&msm_host->pwr_irq_completion);
if (gpio_is_valid(msm_host->pdata->status_gpio)) {
@ -5413,7 +5338,6 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
int ret;
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_disable_host_irq;
@ -5433,12 +5357,6 @@ defer_disable_host_irq:
sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
}
if (host->is_crypto_en) {
ret = sdhci_msm_ice_suspend(host);
if (ret < 0)
pr_err("%s: failed to suspend crypto engine %d\n",
mmc_hostname(host->mmc), ret);
}
trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
ktime_to_us(ktime_sub(ktime_get(), start)));
return 0;
@ -5450,21 +5368,6 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
int ret;
if (host->is_crypto_en) {
ret = sdhci_msm_enable_controller_clock(host);
if (ret) {
pr_err("%s: Failed to enable reqd clocks\n",
mmc_hostname(host->mmc));
goto skip_ice_resume;
}
ret = sdhci_msm_ice_resume(host);
if (ret)
pr_err("%s: failed to resume crypto engine %d\n",
mmc_hostname(host->mmc), ret);
}
skip_ice_resume:
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_enable_host_irq;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -171,12 +171,6 @@ struct sdhci_msm_bus_vote {
struct device_attribute max_bus_bw;
};
struct sdhci_msm_ice_data {
struct qcom_ice_variant_ops *vops;
struct platform_device *pdev;
int state;
};
struct sdhci_msm_regs_restore {
bool is_supported;
bool is_valid;
@ -221,8 +215,6 @@ struct sdhci_msm_debug_data {
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
void __iomem *cryptoio; /* ICE HCI mapped address */
bool ice_hci_support;
int pwr_irq; /* power irq */
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
@ -256,7 +248,6 @@ struct sdhci_msm_host {
bool enhanced_strobe;
bool rclk_delay_fix;
u32 caps_0;
struct sdhci_msm_ice_data ice;
u32 ice_clk_rate;
struct sdhci_msm_pm_qos_group *pm_qos;
int pm_qos_prev_cpu;

View File

@ -1834,50 +1834,6 @@ static int sdhci_get_tuning_cmd(struct sdhci_host *host)
return MMC_SEND_TUNING_BLOCK;
}
static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot)
{
int err = 0;
if (host->mmc->inlinecrypt_reset_needed &&
host->ops->crypto_engine_reset) {
err = host->ops->crypto_engine_reset(host);
if (err) {
pr_err("%s: crypto reset failed\n",
mmc_hostname(host->mmc));
goto out;
}
host->mmc->inlinecrypt_reset_needed = false;
}
if (host->ops->crypto_engine_cfg) {
err = host->ops->crypto_engine_cfg(host, mrq, slot);
if (err) {
pr_err("%s: failed to configure crypto\n",
mmc_hostname(host->mmc));
goto out;
}
}
out:
return err;
}
static int sdhci_crypto_cfg_end(struct sdhci_host *host,
struct mmc_request *mrq)
{
int err = 0;
if (host->ops->crypto_engine_cfg_end) {
err = host->ops->crypto_engine_cfg_end(host, mrq);
if (err) {
pr_err("%s: failed to configure crypto\n",
mmc_hostname(host->mmc));
return err;
}
}
return 0;
}
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@ -1944,13 +1900,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_get_tuning_cmd(host));
}
if (host->is_crypto_en) {
spin_unlock_irqrestore(&host->lock, flags);
if (sdhci_crypto_cfg(host, mrq, 0))
goto end_req;
spin_lock_irqsave(&host->lock, flags);
}
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@ -1960,11 +1909,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
return;
end_req:
mrq->cmd->error = -EIO;
if (mrq->data)
mrq->data->error = -EIO;
mmc_request_done(host->mmc, mrq);
}
void sdhci_set_bus_width(struct sdhci_host *host, int width)
@ -3009,7 +2953,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
sdhci_crypto_cfg_end(host, mrq);
mmc_request_done(host->mmc, mrq);
return false;
@ -4087,59 +4030,6 @@ static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
}
static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
struct sdhci_host *host = mmc_priv(mmc);
int err = 0;
if (!host->is_crypto_en)
return 0;
if (mmc->inlinecrypt_reset_needed && host->ops->crypto_engine_reset) {
err = host->ops->crypto_engine_reset(host);
if (err) {
pr_err("%s: crypto reset failed\n",
mmc_hostname(host->mmc));
goto out;
}
mmc->inlinecrypt_reset_needed = false;
}
if (host->ops->crypto_engine_cmdq_cfg) {
err = host->ops->crypto_engine_cmdq_cfg(host, mrq,
slot, ice_ctx);
if (err) {
pr_err("%s: failed to configure crypto\n",
mmc_hostname(host->mmc));
goto out;
}
}
out:
return err;
}
static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
struct mmc_request *mrq)
{
struct sdhci_host *host = mmc_priv(mmc);
if (!host->is_crypto_en)
return 0;
return sdhci_crypto_cfg_end(host, mrq);
}
static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
{
struct sdhci_host *host = mmc_priv(mmc);
if (!host->is_crypto_en)
return;
if (host->ops->crypto_cfg_reset)
host->ops->crypto_cfg_reset(host, slot);
}
#else
static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
{
@ -4184,23 +4074,6 @@ static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
{
}
static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
return 0;
}
static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
struct mmc_request *mrq)
{
return 0;
}
static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
{
}
#endif
@ -4213,9 +4086,6 @@ static const struct cmdq_host_ops sdhci_cmdq_ops = {
.enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
.post_cqe_halt = sdhci_cmdq_post_cqe_halt,
.set_transfer_params = sdhci_cmdq_set_transfer_params,
.crypto_cfg = sdhci_cmdq_crypto_cfg,
.crypto_cfg_end = sdhci_cmdq_crypto_cfg_end,
.crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset,
};
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT

View File

@ -653,7 +653,6 @@ struct sdhci_host {
enum sdhci_power_policy power_policy;
bool sdio_irq_async_status;
bool is_crypto_en;
u32 auto_cmd_err_sts;
struct ratelimit_state dbg_dump_rs;
@ -695,14 +694,6 @@ struct sdhci_ops {
unsigned int (*get_ro)(struct sdhci_host *host);
void (*reset)(struct sdhci_host *host, u8 mask);
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
int (*crypto_engine_cfg)(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot);
int (*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
int (*crypto_engine_cfg_end)(struct sdhci_host *host,
struct mmc_request *mrq);
int (*crypto_engine_reset)(struct sdhci_host *host);
void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);

View File

@ -2172,8 +2172,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
if (!shost->use_clustering)
q->limits.cluster = 0;
if (shost->inlinecrypt_support)
queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
* which is a common minimum for HBAs, and the minimum DMA alignment,

View File

@ -101,19 +101,6 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
config SCSI_UFS_QCOM_ICE
bool "QCOM specific hooks to Inline Crypto Engine for UFS driver"
depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE
help
This selects the QCOM specific additions to support Inline Crypto
Engine (ICE).
ICE accelerates the crypto operations and maintains the high UFS
performance.
Select this if you have ICE supported for UFS on QCOM chipset.
If unsure, say N.
config SCSI_UFS_TEST
tristate "Universal Flash Storage host controller driver unit-tests"
depends on SCSI_UFSHCD && IOSCHED_TEST

View File

@ -3,7 +3,6 @@
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o

View File

@ -1,777 +0,0 @@
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <crypto/ice.h>
#include "ufshcd.h"
#include "ufs-qcom-ice.h"
#include "ufs-qcom-debugfs.h"
#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto"
/* Timeout waiting for ICE initialization, that requires TZ access */
#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500
#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0
static struct workqueue_struct *ice_workqueue;
static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset,
int len, char *prefix)
{
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
16, 4, qcom_host->hba->mmio_base + offset, len * 4,
false);
}
void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
{
int i;
if (!(qcom_host->dbg_print_en & UFS_QCOM_DBG_PRINT_ICE_REGS_EN))
return;
ufs_qcom_ice_dump_regs(qcom_host, REG_UFS_QCOM_ICE_CFG, 1,
"REG_UFS_QCOM_ICE_CFG ");
for (i = 0; i < NUM_QCOM_ICE_CTRL_INFO_n_REGS; i++) {
pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_1_%d = 0x%08X\n", i,
ufshcd_readl(qcom_host->hba,
(REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * i)));
pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_2_%d = 0x%08X\n", i,
ufshcd_readl(qcom_host->hba,
(REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * i)));
}
if (qcom_host->ice.pdev && qcom_host->ice.vops &&
qcom_host->ice.vops->debug)
qcom_host->ice.vops->debug(qcom_host->ice.pdev);
}
static void ufs_qcom_ice_error_cb(void *host_ctrl, u32 error)
{
struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl;
dev_err(qcom_host->hba->dev, "%s: Error in ice operation 0x%x",
__func__, error);
if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE)
qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
}
static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev)
{
struct device_node *node;
struct platform_device *ice_pdev = NULL;
node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
if (!node) {
dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
__func__);
goto out;
}
ice_pdev = qcom_ice_get_pdevice(node);
out:
return ice_pdev;
}
static
struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev)
{
struct qcom_ice_variant_ops *ice_vops = NULL;
struct device_node *node;
node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
if (!node) {
dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
__func__);
goto out;
}
ice_vops = qcom_ice_get_variant_ops(node);
if (!ice_vops)
dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__);
of_node_put(node);
out:
return ice_vops;
}
/**
* ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host
* @qcom_host: Pointer to a UFS QCom internal host structure.
*
* Sets ICE platform device pointer and ICE vops structure
* corresponding to the current UFS device.
*
* Return: -EINVAL in-case of invalid input parameters:
* qcom_host, qcom_host->hba or qcom_host->hba->dev
* -ENODEV in-case ICE device is not required
* -EPROBE_DEFER in-case ICE is required and hasn't been probed yet
* 0 otherwise
*/
int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
{
struct device *ufs_dev;
int err = 0;
if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) {
pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n",
__func__, qcom_host);
err = -EINVAL;
goto out;
}
ufs_dev = qcom_host->hba->dev;
qcom_host->ice.vops = ufs_qcom_ice_get_vops(ufs_dev);
qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev);
if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
dev_err(ufs_dev, "%s: ICE device not probed yet\n",
__func__);
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
err = -EPROBE_DEFER;
goto out;
}
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n",
__func__, qcom_host->ice.pdev, qcom_host->ice.vops);
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
err = -ENODEV;
goto out;
}
qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
out:
return err;
}
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
unsigned long flags;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
if (!qcom_host->ice.vops->config_start)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
if (!qcom_host->req_pending ||
ufshcd_is_shutdown_ongoing(qcom_host->hba)) {
qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
}
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
*/
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, NULL, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
}
/**
* ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
{
struct device *ufs_dev = qcom_host->hba->dev;
int err;
err = qcom_host->ice.vops->init(qcom_host->ice.pdev,
qcom_host,
ufs_qcom_ice_error_cb);
if (err) {
dev_err(ufs_dev, "%s: ice init failed. err = %d\n",
__func__, err);
goto out;
} else {
qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
}
qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
if (!ice_workqueue) {
ice_workqueue = alloc_workqueue("ice-set-key",
WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0);
if (!ice_workqueue) {
dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
__func__);
err = -ENOMEM;
goto out;
}
INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
}
out:
return err;
}
static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write)
{
if (is_write) {
if (cmd_op == WRITE_6 || cmd_op == WRITE_10 ||
cmd_op == WRITE_16)
return true;
} else {
if (cmd_op == READ_6 || cmd_op == READ_10 ||
cmd_op == READ_16)
return true;
}
return false;
}
int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd, u8 *cc_index, bool *enable)
{
struct ice_data_setting ice_set;
char cmd_op = cmd->cmnd[0];
int err;
unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
__func__);
return 0;
}
if (qcom_host->ice.vops->config_start) {
memset(&ice_set, 0, sizeof(ice_set));
spin_lock_irqsave(
&qcom_host->ice_work_lock, flags);
err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
cmd->request, &ice_set, true);
if (err) {
/*
* config_start() returns -EAGAIN when a key slot is
* available but still not configured. As configuration
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
* propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
if (!ice_workqueue) {
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
dev_err(qcom_host->hba->dev,
"%s: error %d workqueue NULL\n",
__func__, err);
return -EINVAL;
}
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
return err;
}
qcom_host->work_pending = true;
}
} else {
if (err != -EBUSY)
dev_err(qcom_host->hba->dev,
"%s: error in ice_vops->config %d\n",
__func__, err);
}
spin_unlock_irqrestore(&qcom_host->ice_work_lock,
flags);
return err;
}
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
if (ufs_qcom_is_data_cmd(cmd_op, true))
*enable = !ice_set.encr_bypass;
else if (ufs_qcom_is_data_cmd(cmd_op, false))
*enable = !ice_set.decr_bypass;
if (ice_set.crypto_data.key_index >= 0)
*cc_index = (u8)ice_set.crypto_data.key_index;
}
return 0;
}
/**
* ufs_qcom_ice_cfg_start() - starts configuring UFS's ICE registers
* for an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
* @cmd: Pointer to a valid scsi command. cmd->request should also be
* a valid pointer.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
struct ice_data_setting ice_set;
unsigned int slot = 0;
sector_t lba = 0;
unsigned int ctrl_info_val = 0;
unsigned int bypass = 0;
struct request *req;
char cmd_op;
unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
dev_err(dev, "%s: ice state (%d) is not active\n",
__func__, qcom_host->ice.state);
return -EINVAL;
}
if (qcom_host->hw_ver.major >= 0x3) {
/*
* ICE 3.0 crypto sequences were changed,
* CTRL_INFO register no longer exists
* and doesn't need to be configured.
* The configuration is done via utrd.
*/
return 0;
}
req = cmd->request;
if (req->bio)
lba = (req->bio->bi_iter.bi_sector) >>
UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
slot = req->tag;
if (slot < 0 || slot > qcom_host->hba->nutrs) {
dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n",
__func__, slot, qcom_host->hba->nutrs);
return -EINVAL;
}
memset(&ice_set, 0, sizeof(ice_set));
if (qcom_host->ice.vops->config_start) {
spin_lock_irqsave(
&qcom_host->ice_work_lock, flags);
err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
req, &ice_set, true);
if (err) {
/*
* config_start() returns -EAGAIN when a key slot is
* available but still not configured. As configuration
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
* propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
if (!ice_workqueue) {
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
dev_err(qcom_host->hba->dev,
"%s: error %d workqueue NULL\n",
__func__, err);
return -EINVAL;
}
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
spin_unlock_irqrestore(
&qcom_host->ice_work_lock,
flags);
return err;
}
qcom_host->work_pending = true;
}
} else {
if (err != -EBUSY)
dev_err(qcom_host->hba->dev,
"%s: error in ice_vops->config %d\n",
__func__, err);
}
spin_unlock_irqrestore(
&qcom_host->ice_work_lock, flags);
return err;
}
spin_unlock_irqrestore(
&qcom_host->ice_work_lock, flags);
}
cmd_op = cmd->cmnd[0];
#define UFS_QCOM_DIR_WRITE true
#define UFS_QCOM_DIR_READ false
/* if non data command, bypass shall be enabled */
if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) &&
!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
bypass = UFS_QCOM_ICE_ENABLE_BYPASS;
/* if writing data command */
else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE))
bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
/* if reading data command */
else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
UFS_QCOM_ICE_DISABLE_BYPASS;
/* Configure ICE index */
ctrl_info_val =
(ice_set.crypto_data.key_index &
MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX)
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX;
/* Configure data unit size of transfer request */
ctrl_info_val |=
UFS_QCOM_ICE_TR_DATA_UNIT_4_KB
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
/* Configure ICE bypass mode */
ctrl_info_val |=
(bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS)
<< OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS;
if (qcom_host->hw_ver.major == 0x1) {
ufshcd_writel(qcom_host->hba, lba,
(REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot));
ufshcd_writel(qcom_host->hba, ctrl_info_val,
(REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot));
}
if (qcom_host->hw_ver.major == 0x2) {
ufshcd_writel(qcom_host->hba, (lba & 0xFFFFFFFF),
(REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 16 * slot));
ufshcd_writel(qcom_host->hba, ((lba >> 32) & 0xFFFFFFFF),
(REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 16 * slot));
ufshcd_writel(qcom_host->hba, ctrl_info_val,
(REG_UFS_QCOM_ICE_CTRL_INFO_3_n + 16 * slot));
}
/*
* Ensure UFS-ICE registers are being configured
* before next operation, otherwise UFS Host Controller might
* set get errors
*/
mb();
out:
return err;
}
/**
* ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers
* for an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and
* qcom_host->hba->dev should all
* be valid pointers.
* @cmd: Pointer to a valid scsi command. cmd->request should also be
* a valid pointer.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req)
{
int err = 0;
struct device *dev = qcom_host->hba->dev;
if (qcom_host->ice.vops->config_end) {
err = qcom_host->ice.vops->config_end(req);
if (err) {
dev_err(dev, "%s: error in ice_vops->config_end %d\n",
__func__, err);
return err;
}
}
return 0;
}
/**
* ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE)
goto out;
if (qcom_host->ice.vops->reset) {
err = qcom_host->ice.vops->reset(qcom_host->ice.pdev);
if (err) {
dev_err(dev, "%s: ice_vops->reset failed. err %d\n",
__func__, err);
goto out;
}
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
dev_err(qcom_host->hba->dev,
"%s: error. ice.state (%d) is not in active state\n",
__func__, qcom_host->ice.state);
err = -EINVAL;
}
out:
return err;
}
/**
* ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power
* collapse
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state !=
UFS_QCOM_ICE_STATE_SUSPENDED) {
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
if (qcom_host->ice.vops->resume) {
err = qcom_host->ice.vops->resume(qcom_host->ice.pdev);
if (err) {
dev_err(dev, "%s: ice_vops->resume failed. err %d\n",
__func__, err);
return err;
}
}
qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
out:
return err;
}
/**
* ufs_qcom_is_ice_busy() - lets the caller of the function know if
* there is any ongoing operation in ICE in workqueue context.
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host should be a valid pointer.
*
* Return: 1 if ICE is busy, 0 if it is free.
* -EINVAL in case of error.
*/
int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
{
if (!qcom_host) {
pr_err("%s: invalid qcom_host %pK", __func__, qcom_host);
return -EINVAL;
}
if (qcom_host->req_pending)
return 1;
else
return 0;
}
/**
* ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
{
struct device *dev = qcom_host->hba->dev;
int err = 0;
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.vops->suspend) {
err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev);
if (err) {
dev_err(qcom_host->hba->dev,
"%s: ice_vops->suspend failed. err %d\n",
__func__, err);
return -EINVAL;
}
}
if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) {
qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED;
} else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) {
dev_err(qcom_host->hba->dev,
"%s: ice state is invalid: disabled\n",
__func__);
err = -EINVAL;
}
out:
return err;
}
/**
* ufs_qcom_ice_get_status() - returns the status of an ICE transaction
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
* be valid pointers.
* @ice_status: Pointer to a valid output parameter.
* < 0 in case of ICE transaction failure.
* 0 otherwise.
*
* Return: -EINVAL in-case of an error
* 0 otherwise
*/
int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status)
{
struct device *dev = NULL;
int err = 0;
int stat = -EINVAL;
*ice_status = 0;
dev = qcom_host->hba->dev;
if (!dev) {
err = -EINVAL;
goto out;
}
if (!qcom_host->ice.pdev) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
goto out;
}
if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
err = -EINVAL;
goto out;
}
if (!qcom_host->ice.vops) {
dev_err(dev, "%s: invalid ice_vops\n", __func__);
return -EINVAL;
}
if (qcom_host->ice.vops->status) {
stat = qcom_host->ice.vops->status(qcom_host->ice.pdev);
if (stat < 0) {
dev_err(dev, "%s: ice_vops->status failed. stat %d\n",
__func__, stat);
err = -EINVAL;
goto out;
}
*ice_status = stat;
}
out:
return err;
}

View File

@ -1,137 +0,0 @@
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UFS_QCOM_ICE_H_
#define _UFS_QCOM_ICE_H_
#include <scsi/scsi_cmnd.h>
#include "ufs-qcom.h"
/*
* UFS host controller ICE registers. There are n [0..31]
* of each of these registers
*/
enum {
REG_UFS_QCOM_ICE_CFG = 0x2200,
REG_UFS_QCOM_ICE_CTRL_INFO_1_n = 0x2204,
REG_UFS_QCOM_ICE_CTRL_INFO_2_n = 0x2208,
REG_UFS_QCOM_ICE_CTRL_INFO_3_n = 0x220C,
};
#define NUM_QCOM_ICE_CTRL_INFO_n_REGS 32
/* UFS QCOM ICE CTRL Info register offset */
enum {
OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0,
OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1,
OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x6,
};
/* UFS QCOM ICE CTRL Info register masks */
enum {
MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0x1,
MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
MASK_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x8,
};
/* UFS QCOM ICE encryption/decryption bypass state */
enum {
UFS_QCOM_ICE_DISABLE_BYPASS = 0,
UFS_QCOM_ICE_ENABLE_BYPASS = 1,
};
/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */
enum {
UFS_QCOM_ICE_TR_DATA_UNIT_512_B = 0,
UFS_QCOM_ICE_TR_DATA_UNIT_1_KB = 1,
UFS_QCOM_ICE_TR_DATA_UNIT_2_KB = 2,
UFS_QCOM_ICE_TR_DATA_UNIT_4_KB = 3,
UFS_QCOM_ICE_TR_DATA_UNIT_8_KB = 4,
UFS_QCOM_ICE_TR_DATA_UNIT_16_KB = 5,
UFS_QCOM_ICE_TR_DATA_UNIT_32_KB = 6,
};
/* UFS QCOM ICE internal state */
enum {
UFS_QCOM_ICE_STATE_DISABLED = 0,
UFS_QCOM_ICE_STATE_ACTIVE = 1,
UFS_QCOM_ICE_STATE_SUSPENDED = 2,
};
#ifdef CONFIG_SCSI_UFS_QCOM_ICE
int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd, u8 *cc_index, bool *enable);
int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd);
int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
struct request *req);
int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host);
#else
inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
{
if (qcom_host) {
qcom_host->ice.pdev = NULL;
qcom_host->ice.vops = NULL;
}
return -ENODEV;
}
inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
struct scsi_cmnd *cmd)
{
return 0;
}
inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
struct request *req)
{
return 0;
}
inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
{
return 0;
}
inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
int *ice_status)
{
return 0;
}
inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
{
return;
}
inline int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
{
return 0;
}
#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
#endif /* UFS_QCOM_ICE_H_ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019, Linux Foundation. All rights reserved.
* Copyright (c) 2013-2020, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -28,7 +28,6 @@
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
#include "ufs-qcom-ice.h"
#include "ufs-qcom-debugfs.h"
#include "ufs_quirks.h"
@ -406,14 +405,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
* is initialized.
*/
err = ufs_qcom_enable_lane_clks(host);
if (!err && host->ice.pdev) {
err = ufs_qcom_ice_init(host);
if (err) {
dev_err(hba->dev, "%s: ICE init failed (%d)\n",
__func__, err);
err = -EINVAL;
}
}
break;
case POST_CHANGE:
@ -849,7 +840,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_config_vreg(hba->dev,
host->vccq_parent, false);
ufs_qcom_ice_suspend(host);
if (ufs_qcom_is_link_off(hba)) {
/* Assert PHY soft reset */
ufs_qcom_assert_reset(hba);
@ -889,13 +879,6 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err)
goto out;
err = ufs_qcom_ice_resume(host);
if (err) {
dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
__func__, err);
goto out;
}
hba->is_sys_suspended = false;
out:
@ -935,119 +918,6 @@ out:
return ret;
}
#ifdef CONFIG_SCSI_UFS_QCOM_ICE
static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct request *req;
int ret;
if (lrbp->cmd && lrbp->cmd->request)
req = lrbp->cmd->request;
else
return 0;
/* Use request LBA or given dun as the DUN value */
if (req->bio) {
#ifdef CONFIG_PFK
if (bio_dun(req->bio)) {
/* dun @bio can be split, so we have to adjust offset */
*dun = bio_dun(req->bio);
} else {
*dun = req->bio->bi_iter.bi_sector;
*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
}
#else
*dun = req->bio->bi_iter.bi_sector;
*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
#endif
}
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
return ret;
}
static
int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
int err = 0;
if (!host->ice.pdev ||
!lrbp->cmd ||
(lrbp->command_type != UTP_CMD_TYPE_SCSI &&
lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE))
goto out;
err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
out:
return err;
}
static
int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, struct request *req)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
if (!host->ice.pdev || (lrbp->command_type != UTP_CMD_TYPE_SCSI &&
lrbp->command_type != UTP_CMD_TYPE_UFS_STORAGE))
goto out;
err = ufs_qcom_ice_cfg_end(host, req);
out:
return err;
}
static
int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
if (!host->ice.pdev)
goto out;
err = ufs_qcom_ice_reset(host);
out:
return err;
}
static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (!status)
return -EINVAL;
return ufs_qcom_ice_get_status(host, status);
}
static int ufs_qcom_crypto_get_pending_req_status(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
if (!host->ice.pdev)
goto out;
err = ufs_qcom_is_ice_busy(host);
out:
return err;
}
#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
#define ufs_qcom_crypto_req_setup NULL
#define ufs_qcom_crytpo_engine_cfg_start NULL
#define ufs_qcom_crytpo_engine_cfg_end NULL
#define ufs_qcom_crytpo_engine_reset NULL
#define ufs_qcom_crypto_engine_get_status NULL
#define ufs_qcom_crypto_get_pending_req_status NULL
#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
struct ufs_qcom_dev_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */
@ -1629,7 +1499,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@ -1648,14 +1517,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
err = ufs_qcom_ice_resume(host);
if (err)
goto out;
} else if (!on && (status == PRE_CHANGE)) {
err = ufs_qcom_ice_suspend(host);
if (err)
goto out;
/*
* If auto hibern8 is supported then the link will already
* be in hibern8 state and the ref clock can be gated.
@ -1674,8 +1536,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
}
}
out:
return err;
return 0;
}
#ifdef CONFIG_SMP /* CONFIG_SMP */
@ -2209,36 +2070,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
spin_lock_init(&host->ice_work_lock);
ufshcd_set_variant(hba, host);
err = ufs_qcom_ice_get_dev(host);
if (err == -EPROBE_DEFER) {
/*
* UFS driver might be probed before ICE driver does.
* In that case we would like to return EPROBE_DEFER code
* in order to delay its probing.
*/
dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
__func__, err);
goto out_variant_clear;
} else if (err == -ENODEV) {
/*
* ICE device is not enabled in DTS file. No need for further
* initialization of ICE driver.
*/
dev_warn(dev, "%s: ICE device is not enabled",
__func__);
} else if (err) {
dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
__func__, err);
goto out_variant_clear;
} else {
hba->host->inlinecrypt_support = 1;
}
host->generic_phy = devm_phy_get(dev, "ufsphy");
if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
@ -2812,7 +2646,6 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
usleep_range(1000, 1100);
ufs_qcom_phy_dbg_register_dump(phy);
usleep_range(1000, 1100);
ufs_qcom_ice_print_regs(host);
}
/**
@ -2843,15 +2676,6 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
#endif
};
static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
.crypto_req_setup = ufs_qcom_crypto_req_setup,
.crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
.crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
.crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
.crypto_get_req_status = ufs_qcom_crypto_get_pending_req_status,
};
static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
.req_start = ufs_qcom_pm_qos_req_start,
.req_end = ufs_qcom_pm_qos_req_end,
@ -2860,7 +2684,6 @@ static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
static struct ufs_hba_variant ufs_hba_qcom_variant = {
.name = "qcom",
.vops = &ufs_hba_qcom_vops,
.crypto_vops = &ufs_hba_crypto_variant_ops,
.pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
};

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -236,26 +236,6 @@ struct ufs_qcom_testbus {
u8 select_minor;
};
/**
* struct ufs_qcom_ice_data - ICE related information
* @vops: pointer to variant operations of ICE
* @async_done: completion for supporting ICE's driver asynchronous nature
* @pdev: pointer to the proper ICE platform device
* @state: UFS-ICE interface's internal state (see
* ufs-qcom-ice.h for possible internal states)
* @quirks: UFS-ICE interface related quirks
* @crypto_engine_err: crypto engine errors
*/
struct ufs_qcom_ice_data {
struct qcom_ice_variant_ops *vops;
struct platform_device *pdev;
int state;
u16 quirks;
bool crypto_engine_err;
};
#ifdef CONFIG_DEBUG_FS
struct qcom_debugfs_files {
struct dentry *debugfs_root;
@ -363,7 +343,6 @@ struct ufs_qcom_host {
bool disable_lpm;
bool is_lane_clks_enabled;
bool sec_cfg_updated;
struct ufs_qcom_ice_data ice;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
@ -378,8 +357,6 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
spinlock_t ice_work_lock;
struct work_struct ice_cfg_work;
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
struct ufs_vreg *vccq_parent;

View File

@ -1409,8 +1409,6 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
u32 val = CONTROLLER_ENABLE;
if (ufshcd_is_crypto_supported(hba))
val |= CRYPTO_GENERAL_ENABLE;
ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}
@ -3360,41 +3358,6 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u8 cc_index = 0;
bool enable = false;
u64 dun = 0;
int ret;
/*
* Call vendor specific code to get crypto info for this request:
* enable, crypto config. index, DUN.
* If bypass is set, don't bother setting the other fields.
*/
ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
if (ret) {
if (ret != -EAGAIN) {
dev_err(hba->dev,
"%s: failed to setup crypto request (%d)\n",
__func__, ret);
}
return ret;
}
if (!enable)
goto out;
req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
out:
return 0;
}
/**
* ufshcd_prepare_req_desc_hdr() - Fills the requests header
* descriptor according to request
@ -3443,9 +3406,6 @@ static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
req_desc->prd_table_length = 0;
if (ufshcd_is_crypto_supported(hba))
return ufshcd_prepare_crypto_utrd(hba, lrbp);
return 0;
}
@ -3709,13 +3669,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_get_read_lock(hba, cmd->device->lun);
if (unlikely(err < 0)) {
if (err == -EPERM) {
if (!ufshcd_vops_crypto_engine_get_req_status(hba)) {
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
return 0;
} else {
return SCSI_MLQUEUE_HOST_BUSY;
}
return SCSI_MLQUEUE_HOST_BUSY;
}
if (err == -EAGAIN)
return SCSI_MLQUEUE_HOST_BUSY;
@ -3851,22 +3805,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
if (err) {
if (err != -EAGAIN)
dev_err(hba->dev,
"%s: failed to configure crypto engine %d\n",
__func__, err);
scsi_dma_unmap(lrbp->cmd);
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
@ -3882,7 +3820,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
dev_err(hba->dev, "%s: failed sending command, %d\n",
__func__, err);
err = DID_ERROR;
@ -6452,8 +6389,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
false);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
req = cmd->request;
@ -6536,8 +6471,6 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
true);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
@ -7474,8 +7407,6 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
ufsdbg_error_inject_dispatcher(hba,
ERR_INJECT_INTR, intr_status, &intr_status);
ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (hba->errors || hba->ce_error)
retval |= ufshcd_check_errors(hba);
@ -7952,16 +7883,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
goto out;
}
if (!err) {
err = ufshcd_vops_crypto_engine_reset(hba);
if (err) {
dev_err(hba->dev,
"%s: failed to reset crypto engine %d\n",
__func__, err);
goto out;
}
}
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);

View File

@ -369,30 +369,6 @@ struct ufs_hba_variant_ops {
#endif
};
/**
* struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks
* @crypto_req_setup: retreieve the necessary cryptographic arguments to setup
a requests's transfer descriptor.
* @crypto_engine_cfg_start: start configuring cryptographic engine
* according to tag
* parameter
* @crypto_engine_cfg_end: end configuring cryptographic engine
* according to tag parameter
* @crypto_engine_reset: perform reset to the cryptographic engine
* @crypto_engine_get_status: get errors status of the cryptographic engine
* @crypto_get_req_status: Check if crypto driver still holds request or not
*/
struct ufs_hba_crypto_variant_ops {
int (*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
u8 *cc_index, bool *enable, u64 *dun);
int (*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int);
int (*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *,
struct request *);
int (*crypto_engine_reset)(struct ufs_hba *);
int (*crypto_engine_get_status)(struct ufs_hba *, u32 *);
int (*crypto_get_req_status)(struct ufs_hba *);
};
/**
* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
*/
@ -409,7 +385,6 @@ struct ufs_hba_variant {
struct device *dev;
const char *name;
struct ufs_hba_variant_ops *vops;
struct ufs_hba_crypto_variant_ops *crypto_vops;
struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
};
@ -1501,55 +1476,6 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
}
#endif
static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_req_setup)
return hba->var->crypto_vops->crypto_req_setup(hba, lrbp,
cc_index, enable, dun);
return 0;
}
static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba,
unsigned int task_tag)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_engine_cfg_start)
return hba->var->crypto_vops->crypto_engine_cfg_start
(hba, task_tag);
return 0;
}
static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp,
struct request *req)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_engine_cfg_end)
return hba->var->crypto_vops->crypto_engine_cfg_end
(hba, lrbp, req);
return 0;
}
static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_engine_reset)
return hba->var->crypto_vops->crypto_engine_reset(hba);
return 0;
}
static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
u32 *status)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_engine_get_status)
return hba->var->crypto_vops->crypto_engine_get_status(hba,
status);
return 0;
}
static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
struct request *req)
{
@ -1565,13 +1491,4 @@ static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
hba->var->pm_qos_vops->req_end(hba, req, lock);
}
static inline int ufshcd_vops_crypto_engine_get_req_status(struct ufs_hba *hba)
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_get_req_status)
return hba->var->crypto_vops->crypto_get_req_status(hba);
return 0;
}
#endif /* End of Header */

View File

@ -1,15 +1,11 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
ccflags-y += -Ifs/ext4
ccflags-y += -Ifs/f2fs
fscrypto-y := crypto.o \
fname.o \
hkdf.o \
hooks.o \
keyring.o \
keysetup.o \
fscrypt_ice.o \
keysetup_v1.o \
policy.o

View File

@ -33,14 +33,10 @@ void fscrypt_decrypt_bio(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
if (fscrypt_using_hardware_encryption(page->mapping->host)) {
SetPageUptodate(page);
} else {
int ret = fscrypt_decrypt_pagecache_blocks(page,
bv->bv_len, bv->bv_offset);
if (ret)
SetPageError(page);
}
int ret = fscrypt_decrypt_pagecache_blocks(page,
bv->bv_len, bv->bv_offset);
if (ret)
SetPageError(page);
}
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
@ -72,7 +68,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
if (WARN_ON(ret != blocksize)) {
/* should never happen! */

View File

@ -1,190 +0,0 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "fscrypt_ice.h"
extern int fscrypt_get_mode_key_size(int mode);
int fscrypt_using_hardware_encryption(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
return S_ISREG(inode->i_mode) && ci &&
(fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE);
}
EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
if (inode)
ci = inode->i_crypt_info;
if (!ci)
return 0;
return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2;
}
size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
if (inode)
ci = inode->i_crypt_info;
if (!ci)
return 0;
return fscrypt_get_mode_key_size(fscrypt_policy_contents_mode(&(ci->ci_policy))) / 2;
}
/*
* Retrieves encryption key from the inode
*/
char *fscrypt_get_ice_encryption_key(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
if (!inode)
return NULL;
ci = inode->i_crypt_info;
if (!ci)
return NULL;
return &(ci->ci_raw_key[0]);
}
/*
* Retrieves encryption salt from the inode
*/
char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
int size = 0;
if (!inode)
return NULL;
ci = inode->i_crypt_info;
if (!ci)
return NULL;
size = fscrypt_get_ice_encryption_key_size(inode);
if (!size)
return NULL;
return &(ci->ci_raw_key[size]);
}
/*
* returns true if the cipher mode in inode is AES XTS
*/
int fscrypt_is_aes_xts_cipher(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
if (!ci)
return 0;
return (fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE);
}
/*
* returns true if encryption info in both inodes is equal
*/
bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
const struct inode *inode2)
{
char *key1 = NULL;
char *key2 = NULL;
char *salt1 = NULL;
char *salt2 = NULL;
if (!inode1 || !inode2)
return false;
if (inode1 == inode2)
return true;
/* both do not belong to ice, so we don't care, they are equal
*for us
*/
if (!fscrypt_should_be_processed_by_ice(inode1) &&
!fscrypt_should_be_processed_by_ice(inode2))
return true;
/* one belongs to ice, the other does not -> not equal */
if (fscrypt_should_be_processed_by_ice(inode1) ^
fscrypt_should_be_processed_by_ice(inode2))
return false;
key1 = fscrypt_get_ice_encryption_key(inode1);
key2 = fscrypt_get_ice_encryption_key(inode2);
salt1 = fscrypt_get_ice_encryption_salt(inode1);
salt2 = fscrypt_get_ice_encryption_salt(inode2);
/* key and salt should not be null by this point */
if (!key1 || !key2 || !salt1 || !salt2 ||
(fscrypt_get_ice_encryption_key_size(inode1) !=
fscrypt_get_ice_encryption_key_size(inode2)) ||
(fscrypt_get_ice_encryption_salt_size(inode1) !=
fscrypt_get_ice_encryption_salt_size(inode2)))
return false;
if ((memcmp(key1, key2,
fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
(memcmp(salt1, salt2,
fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
return true;
return false;
}
void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
{
if (fscrypt_should_be_processed_by_ice(inode))
bio->bi_iter.bi_dun = dun;
}
EXPORT_SYMBOL(fscrypt_set_ice_dun);
void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip)
{
#ifdef CONFIG_DM_DEFAULT_KEY
bio->bi_crypt_skip = bi_crypt_skip;
#endif
}
EXPORT_SYMBOL(fscrypt_set_ice_skip);
/*
* This function will be used for filesystem when deciding to merge bios.
* Basic assumption is, if inline_encryption is set, single bio has to
* guarantee consecutive LBAs as well as ino|pg->index.
*/
bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted,
int bi_crypt_skip)
{
if (!bio)
return true;
#ifdef CONFIG_DM_DEFAULT_KEY
if (bi_crypt_skip != bio->bi_crypt_skip)
return false;
#endif
/* if both of them are not encrypted, no further check is needed */
if (!bio_dun(bio) && !bio_encrypted)
return true;
/* ICE allows only consecutive iv_key stream. */
return bio_end_dun(bio) == dun;
}
EXPORT_SYMBOL(fscrypt_mergeable_bio);

View File

@ -1,99 +0,0 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _FSCRYPT_ICE_H
#define _FSCRYPT_ICE_H
#include <linux/blkdev.h>
#include "fscrypt_private.h"
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
{
if (!inode->i_sb->s_cop)
return 0;
if (!IS_ENCRYPTED((struct inode *)inode))
return 0;
return fscrypt_using_hardware_encryption(inode);
}
static inline int fscrypt_is_ice_capable(const struct super_block *sb)
{
return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
}
int fscrypt_is_aes_xts_cipher(const struct inode *inode);
char *fscrypt_get_ice_encryption_key(const struct inode *inode);
char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
const struct inode *inode2);
size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode);
size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode);
#else
static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
{
return 0;
}
static inline int fscrypt_is_ice_capable(const struct super_block *sb)
{
return 0;
}
static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
{
return NULL;
}
static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
{
return NULL;
}
static inline size_t fscrypt_get_ice_encryption_key_size(
const struct inode *inode)
{
return 0;
}
static inline size_t fscrypt_get_ice_encryption_salt_size(
const struct inode *inode)
{
return 0;
}
static inline int fscrypt_is_xts_cipher(const struct inode *inode)
{
return 0;
}
static inline bool fscrypt_is_ice_encryption_info_equal(
const struct inode *inode1,
const struct inode *inode2)
{
return 0;
}
static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
{
return 0;
}
#endif
#endif /* _FSCRYPT_ICE_H */

View File

@ -13,7 +13,6 @@
#include <linux/fscrypt.h>
#include <crypto/hash.h>
#include <linux/pfk.h>
#define CONST_STRLEN(str) (sizeof(str) - 1)
@ -160,8 +159,10 @@ struct fscrypt_symlink_data {
* inode is evicted.
*/
struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
/* True if the key should be freed when this fscrypt_info is freed */
@ -219,10 +220,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_PRIVATE &&
filenames_mode == FSCRYPT_MODE_AES_256_CTS)
return true;
if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
filenames_mode == FSCRYPT_MODE_ADIANTUM)
return true;

View File

@ -12,7 +12,6 @@
#include <linux/key.h>
#include "fscrypt_private.h"
#include "fscrypt_ice.h"
static struct fscrypt_mode available_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
@ -52,12 +51,6 @@ static struct fscrypt_mode available_modes[] = {
},
};
static int fscrypt_data_encryption_mode(struct inode *inode)
{
return fscrypt_should_be_processed_by_ice(inode) ?
FSCRYPT_MODE_PRIVATE : FSCRYPT_MODE_AES_256_XTS;
}
static struct fscrypt_mode *
select_encryption_mode(const union fscrypt_policy *policy,
const struct inode *inode)
@ -393,7 +386,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.version = FSCRYPT_CONTEXT_V1;
ctx.v1.contents_encryption_mode = fscrypt_data_encryption_mode(inode);
ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS;
ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS;
memset(ctx.v1.master_key_descriptor, 0x42,
FSCRYPT_KEY_DESCRIPTOR_SIZE);
@ -487,11 +480,6 @@ void fscrypt_put_encryption_info(struct inode *inode)
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);
int fscrypt_get_mode_key_size(int mode)
{
return available_modes[mode].keysize;
}
/**
* fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay
*

View File

@ -306,25 +306,10 @@ out:
int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
{
int err;
if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return setup_v1_file_key_direct(ci, raw_master_key);
} else if(S_ISREG(ci->ci_inode->i_mode) &&
(fscrypt_policy_contents_mode(&(ci->ci_policy)) == FSCRYPT_MODE_PRIVATE)) {
/* Inline encryption: no key derivation required because IVs are
* assigned based on iv_sector.
*/
if (ci->ci_mode->keysize != FSCRYPT_MAX_KEY_SIZE) {
err = -EINVAL;
} else {
memcpy(ci->ci_raw_key, raw_master_key, ci->ci_mode->keysize);
err = 0;
}
}
else {
else
return setup_v1_file_key_derived(ci, raw_master_key);
}
return err;
}
int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)

View File

@ -37,8 +37,6 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
@ -454,23 +452,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
}
#ifdef CONFIG_PFK
static bool is_inode_filesystem_type(const struct inode *inode,
const char *fs_type)
{
if (!inode || !fs_type)
return false;
if (!inode->i_sb)
return false;
if (!inode->i_sb->s_type)
return false;
return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
}
#endif
/*
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
@ -493,17 +474,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_disk;
#ifdef CONFIG_PFK
bio->bi_dio_inode = dio->inode;
/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */
#define PG_DUN_NEW(i,p) \
(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff))
if (is_inode_filesystem_type(dio->inode, "f2fs"))
fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode,
(sdio->logical_offset_in_bio >> PAGE_SHIFT)));
#endif
if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
dio->bio_cookie = BLK_QC_T_NONE;
@ -515,18 +486,6 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
sdio->logical_offset_in_bio = 0;
}
struct inode *dio_bio_get_inode(struct bio *bio)
{
struct inode *inode = NULL;
if (bio == NULL)
return NULL;
#ifdef CONFIG_PFK
inode = bio->bi_dio_inode;
#endif
return inode;
}
/*
* Release any resources in case of a failure
*/

View File

@ -108,16 +108,10 @@ config EXT4_ENCRYPTION
files
config EXT4_FS_ENCRYPTION
bool "Ext4 FS Encryption"
bool
default n
depends on EXT4_ENCRYPTION
config EXT4_FS_ICE_ENCRYPTION
bool "Ext4 Encryption with ICE support"
default n
depends on EXT4_FS_ENCRYPTION
depends on PFK
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS

View File

@ -205,10 +205,7 @@ typedef struct ext4_io_end {
ssize_t size; /* size of the extent */
} ext4_io_end_t;
#define EXT4_IO_ENCRYPTED 1
struct ext4_io_submit {
unsigned int io_flags;
struct writeback_control *io_wbc;
struct bio *io_bio;
ext4_io_end_t *io_end;

View File

@ -1234,12 +1234,10 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
decrypt = IS_ENCRYPTED(inode) &&
S_ISREG(inode->i_mode) &&
!fscrypt_using_hardware_encryption(inode);
ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
1, &bh);
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = IS_ENCRYPTED(inode) &&
S_ISREG(inode->i_mode);
}
}
/*
@ -3744,14 +3742,9 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
#if defined(CONFIG_EXT4_FS_ENCRYPTION)
WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode));
#endif
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block_func, ext4_end_io_dio, NULL,
dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
@ -3863,9 +3856,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
#if defined(CONFIG_FS_ENCRYPTION)
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode))
#ifdef CONFIG_FS_ENCRYPTION
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
if (fsverity_active(inode))
@ -4028,7 +4020,6 @@ static int __ext4_block_zero_page_range(handle_t *handle,
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
bool decrypt;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_SHIFT,
@ -4071,15 +4062,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh)) {
err = -EIO;
decrypt = S_ISREG(inode->i_mode) &&
IS_ENCRYPTED(inode) &&
!fscrypt_using_hardware_encryption(inode);
ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (decrypt) {
if (S_ISREG(inode->i_mode) &&
IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);

View File

@ -603,13 +603,10 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
if (!fscrypt_using_hardware_encryption(orig_inode) ||
!fscrypt_using_hardware_encryption(donor_inode)) {
if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
}
if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
}
/* Protect orig and donor inodes against a truncate */

View File

@ -344,8 +344,6 @@ void ext4_io_submit(struct ext4_io_submit *io)
int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
REQ_SYNC : 0;
io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
if (io->io_flags & EXT4_IO_ENCRYPTED)
io_op_flags |= REQ_NOENCRYPT;
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
submit_bio(io->io_bio);
}
@ -355,7 +353,6 @@ void ext4_io_submit(struct ext4_io_submit *io)
void ext4_io_submit_init(struct ext4_io_submit *io,
struct writeback_control *wbc)
{
io->io_flags = 0;
io->io_wbc = wbc;
io->io_bio = NULL;
io->io_end = NULL;
@ -483,24 +480,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (io->io_bio)
gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
retry_encrypt:
if (!fscrypt_using_hardware_encryption(inode)) {
bounce_page = fscrypt_encrypt_pagecache_blocks(page,
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM && (io->io_bio ||
wbc->sync_mode == WB_SYNC_ALL)) {
gfp_flags = GFP_NOFS;
if (io->io_bio)
ext4_io_submit(io);
else
gfp_flags |= __GFP_NOFAIL;
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_encrypt;
}
bounce_page = NULL;
goto out;
bounce_page = fscrypt_encrypt_pagecache_blocks(page,
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM && (io->io_bio ||
wbc->sync_mode == WB_SYNC_ALL)) {
gfp_flags = GFP_NOFS;
if (io->io_bio)
ext4_io_submit(io);
else
gfp_flags |= __GFP_NOFAIL;
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_encrypt;
}
bounce_page = NULL;
goto out;
}
}
@ -508,8 +503,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
if (bounce_page)
io->io_flags |= EXT4_IO_ENCRYPTED;
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
if (ret) {
/*

View File

@ -412,8 +412,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ,
ctx ? REQ_NOENCRYPT : 0);
bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
length = first_hole << blkbits;

View File

@ -700,7 +700,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct inode *inode = fio->page->mapping->host;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
@ -713,15 +712,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
if (f2fs_may_encrypt_bio(inode, fio))
fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
@ -902,9 +896,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct inode *inode;
bool bio_encrypted;
int bi_crypt_skip;
u64 dun;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@ -914,26 +905,14 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
inode = fio->page->mapping->host;
dun = PG_DUN(inode, fio->page);
bi_crypt_skip = fio->encrypted_page ? 1 : 0;
bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
/* ICE support */
if (bio && !fscrypt_mergeable_bio(bio, dun,
bio_encrypted, bi_crypt_skip)) {
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
}
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_PAGES);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
if (bio_encrypted)
fscrypt_set_ice_dun(inode, bio, dun);
fscrypt_set_ice_skip(bio, bi_crypt_skip);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
if (add_ipu_page(fio->sbi, &bio, page))
@ -957,10 +936,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
struct inode *inode;
bool bio_encrypted;
int bi_crypt_skip;
u64 dun;
f2fs_bug_on(sbi, is_read_io(fio->op));
@ -987,12 +962,6 @@ next:
else
bio_page = fio->page;
inode = fio->page->mapping->host;
dun = PG_DUN(inode, fio->page);
bi_crypt_skip = fio->encrypted_page ? 1 : 0;
bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
/* set submitted = true as a return value */
fio->submitted = true;
@ -1001,11 +970,6 @@ next:
if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio,
io->last_block_in_bio, fio->new_blkaddr))
__submit_merged_bio(io);
/* ICE support */
if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
if (F2FS_IO_ALIGNED(sbi) &&
@ -1016,9 +980,6 @@ alloc_new:
goto skip;
}
io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
if (bio_encrypted)
fscrypt_set_ice_dun(inode, io->bio, dun);
fscrypt_set_ice_skip(io->bio, bi_crypt_skip);
io->fio = *fio;
}
@ -1065,13 +1026,9 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ,
(IS_ENCRYPTED(inode) ?
REQ_NOENCRYPT :
op_flag));
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (f2fs_encrypted_file(inode) &&
!fscrypt_using_hardware_encryption(inode))
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (f2fs_compressed_file(inode))
post_read_steps |= 1 << STEP_DECOMPRESS;
@ -1108,9 +1065,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
if (IS_ERR(bio))
return PTR_ERR(bio);
if (f2fs_may_encrypt_bio(inode, NULL))
fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
@ -2037,8 +1991,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
sector_t last_block_in_file;
sector_t block_nr;
int ret = 0;
bool bio_encrypted;
u64 dun;
block_in_file = (sector_t)page_index(page);
last_block = block_in_file + nr_pages;
@ -2109,13 +2061,6 @@ submit_and_realloc:
bio = NULL;
}
dun = PG_DUN(inode, page);
bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0, page->index,
@ -2125,10 +2070,7 @@ submit_and_realloc:
bio = NULL;
goto out;
}
if (bio_encrypted)
fscrypt_set_ice_dun(inode, bio, dun);
}
/*
* If the page is under writeback, we need to wait for
* its completion to see the correct decrypted data.
@ -2465,9 +2407,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
if (fscrypt_using_hardware_encryption(inode))
return 0;
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {

View File

@ -4035,8 +4035,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
if (f2fs_encrypted_file(inode) &&
!fscrypt_using_hardware_encryption(inode))
if (f2fs_encrypted_file(inode))
return true;
if (f2fs_is_multi_device(sbi))
return true;
@ -4061,16 +4060,6 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return false;
}
static inline bool f2fs_may_encrypt_bio(struct inode *inode,
struct f2fs_io_info *fio)
{
if (fio && (fio->type != DATA || fio->encrypted_page))
return false;
return (f2fs_encrypted_file(inode) &&
fscrypt_using_hardware_encryption(inode));
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
unsigned int type);

View File

@ -3039,11 +3039,6 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
if (error)
return error;
error = security_inode_post_create(dir, dentry, mode);
if (error)
return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@ -3876,11 +3871,6 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (error)
return error;
error = security_inode_post_create(dir, dentry, mode);
if (error)
return error;
if (!error)
fsnotify_create(dir, dentry);
return error;

View File

@ -69,9 +69,6 @@
((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
#define bio_dun(bio) ((bio)->bi_iter.bi_dun)
#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */
#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio))
/*
* Return the data direction, READ or WRITE.
@ -181,11 +178,6 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
#ifdef CONFIG_PFK
if (iter->bi_dun)
iter->bi_dun += bytes >> 12;
#endif
if (bio_no_advance_iter(bio)) {
iter->bi_size -= bytes;
iter->bi_done += bytes;

View File

@ -100,13 +100,6 @@ struct bio {
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
};
#ifdef CONFIG_PFK
/* Encryption key to use (NULL if none) */
const struct blk_encryption_key *bi_crypt_key;
#endif
#ifdef CONFIG_DM_DEFAULT_KEY
int bi_crypt_skip;
#endif
unsigned short bi_vcnt; /* how many bio_vec's */
@ -121,9 +114,7 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
#ifdef CONFIG_PFK
struct inode *bi_dio_inode;
#endif
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
@ -248,13 +239,6 @@ enum req_flag_bits {
__REQ_URGENT, /* urgent request */
__REQ_NOWAIT, /* Don't wait if request will block */
/* Android specific flags */
__REQ_NOENCRYPT, /*
* ok to not encrypt (already encrypted at fs
* level)
*/
__REQ_NR_BITS, /* stops here */
};
@ -272,7 +256,6 @@ enum req_flag_bits {
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)

View File

@ -154,7 +154,6 @@ struct request {
unsigned int __data_len; /* total data len */
int tag;
sector_t __sector; /* sector cursor */
u64 __dun; /* dun for UFS */
struct bio *bio;
struct bio *biotail;
@ -653,7 +652,6 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_INLINECRYPT 29 /* inline encryption support */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@ -753,8 +751,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_inlinecrypt(q) \
test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@ -1035,11 +1031,6 @@ static inline sector_t blk_rq_pos(const struct request *rq)
return rq->__sector;
}
static inline sector_t blk_rq_dun(const struct request *rq)
{
return rq->__dun;
}
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;

View File

@ -44,9 +44,6 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
#ifdef CONFIG_PFK
u64 bi_dun; /* DUN setting for bio */
#endif
};
/*

View File

@ -3071,8 +3071,6 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
struct inode *dio_bio_get_inode(struct bio *bio);
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);

View File

@ -20,10 +20,6 @@
#define FS_CRYPTO_BLOCK_SIZE 16
/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */
#define PG_DUN(i, p) \
(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
struct fscrypt_info;
struct fscrypt_str {
@ -745,33 +741,6 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode,
return 0;
}
/* fscrypt_ice.c */
#ifdef CONFIG_PFK
extern int fscrypt_using_hardware_encryption(const struct inode *inode);
extern void fscrypt_set_ice_dun(const struct inode *inode,
struct bio *bio, u64 dun);
extern void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip);
extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted,
int bi_crypt_skip);
#else
static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
{
return 0;
}
static inline void fscrypt_set_ice_dun(const struct inode *inode,
struct bio *bio, u64 dun){}
static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip)
{}
static inline bool fscrypt_mergeable_bio(struct bio *bio,
u64 dun, bool bio_encrypted, int bi_crypt_skip)
{
return true;
}
#endif
/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
static inline void fscrypt_finalize_bounce_page(struct page **pagep)
{

View File

@ -1475,8 +1475,6 @@ union security_list_options {
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@ -1790,7 +1788,6 @@ struct security_hook_heads {
struct list_head inode_free_security;
struct list_head inode_init_security;
struct list_head inode_create;
struct list_head inode_post_create;
struct list_head inode_link;
struct list_head inode_unlink;
struct list_head inode_symlink;

View File

@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/blkdev.h>
struct mmc_data;
struct mmc_request;
@ -169,8 +170,8 @@ struct mmc_request {
void (*recovery_notifier)(struct mmc_request *);
struct mmc_host *host;
struct mmc_cmdq_req *cmdq_req;
struct request *req;
struct request *req;
/* Allow other commands during this ongoing data transfer or busy wait */
bool cap_cmd_during_tfr;
ktime_t io_start;

View File

@ -1,79 +0,0 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef PFK_H_
#define PFK_H_
#include <linux/bio.h>
struct ice_crypto_setting;
#ifdef CONFIG_PFK
/*
* Default key for inline encryption.
*
* For now only AES-256-XTS is supported, so this is a fixed length. But if
* ever needed, this should be made variable-length with a 'mode' and 'size'.
* (Remember to update pfk_allow_merge_bio() when doing so!)
*/
#define BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS 64
struct blk_encryption_key {
u8 raw[BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS];
};
int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting,
bool *is_pfe, bool async);
int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
int pfk_remove_key(const unsigned char *key, size_t key_size);
int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
void pfk_clear_on_reset(void);
#else
static inline int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
{
return -ENODEV;
}
static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
return -ENODEV;
}
static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
{
return -ENODEV;
}
static inline bool pfk_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2)
{
return true;
}
static inline int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
return -ENODEV;
}
static inline void pfk_clear_on_reset(void)
{}
#endif /* CONFIG_PFK */
#endif /* PFK_H */

View File

@ -31,7 +31,6 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/bio.h>
struct linux_binprm;
struct cred;
@ -271,8 +270,6 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_post_create(struct inode *dir, struct dentry *dentry,
umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@ -667,13 +664,6 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
static inline int security_inode_post_create(struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
return 0;
}
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)

View File

@ -666,9 +666,6 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
/* Inline encryption support? */
unsigned inlinecrypt_support:1;
unsigned use_blk_mq:1;
unsigned use_cmd_list:1;

View File

@ -6,10 +6,6 @@ menu "Security options"
source security/keys/Kconfig
if ARCH_QCOM
source security/pfe/Kconfig
endif
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n

View File

@ -10,7 +10,6 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@ -27,7 +26,6 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
obj-$(CONFIG_ARCH_QCOM) += pfe/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity

View File

@ -1,50 +0,0 @@
menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
depends on ARCH_QCOM
config PFT
bool "Per-File-Tagger driver"
depends on SECURITY
default n
help
This driver is used for tagging enterprise files.
It is part of the Per-File-Encryption (PFE) feature.
The driver is tagging files when created by
registered application.
Tagged files are encrypted using the dm-req-crypt driver.
config PFK
bool "Per-File-Key driver"
depends on SECURITY
depends on SECURITY_SELINUX
default n
help
This driver is used for storing eCryptfs information
in file node.
This is part of eCryptfs hardware enhanced solution
provided by Qualcomm Technologies, Inc.
Information is used when file is encrypted later using
ICE or dm crypto engine
config PFK_WRAPPED_KEY_SUPPORTED
bool "Per-File-Key driver with wrapped key support"
depends on SECURITY
depends on SECURITY_SELINUX
depends on QSEECOM
depends on PFK
default n
help
Adds wrapped key support in PFK driver. Instead of setting
the key directly in ICE, it unwraps the key and sets the key
in ICE.
config PFK_VIRTUALIZED
bool "Per-File-Key driver virtualized version"
depends on SECURITY
depends on SECURITY_SELINUX
depends on QSEECOM
depends on PFK
depends on MSM_HAB
help
Makes the driver to use the hypervisor back end for ICE HW
operation virtualization instead of calling directly to TZ.
endmenu

View File

@ -1,15 +0,0 @@
#
# Makefile for the MSM specific security device drivers.
#
ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
ccflags-y += -Ifs/crypto
ccflags-y += -Idrivers/misc
obj-$(CONFIG_PFT) += pft.o
obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ext4.o pfk_f2fs.o
ifdef CONFIG_PFK_VIRTUALIZED
obj-$(CONFIG_PFK_VIRTUALIZED) += pfk_ice_virt.o
else
obj-$(CONFIG_PFK) += pfk_ice.o
endif

View File

@ -1,570 +0,0 @@
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Per-File-Key (PFK).
*
* This driver is responsible for overall management of various
* Per File Encryption variants that work on top of or as part of different
* file systems.
*
* The driver has the following purpose :
* 1) Define priorities between PFE's if more than one is enabled
* 2) Extract key information from inode
* 3) Load and manage various keys in ICE HW engine
* 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
* that need to take decision on HW encryption management of the data
* Some examples:
* BLOCK LAYER: when it takes decision on whether 2 chunks can be united
* to one encryption / decryption request sent to the HW
*
* UFS DRIVER: when it need to configure ICE HW with a particular key slot
* to be used for encryption / decryption
*
* PFE variants can differ on particular way of storing the cryptographic info
* inside inode, actions to be taken upon file operations, etc., but the common
* properties are described above
*
*/
/* Uncomment the line below to enable debug messages */
/* #define DEBUG 1 */
#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include <linux/bio.h>
#include <linux/security.h>
#include <crypto/algapi.h>
#include <crypto/ice.h>
#include <linux/pfk.h>
#include "pfk_kc.h"
#include "objsec.h"
#include "pfk_ice.h"
#include "pfk_ext4.h"
#include "pfk_f2fs.h"
#include "pfk_internal.h"
static bool pfk_ready;
/* might be replaced by a table when more than one cipher is supported */
#define PFK_SUPPORTED_KEY_SIZE 32
#define PFK_SUPPORTED_SALT_SIZE 32
/* Various PFE types and function tables to support each one of them */
enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
typedef int (*pfk_parse_inode_type)(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe);
typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2);
static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
/* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
/* F2FS_CRYPT_PFE */ &pfk_f2fs_parse_inode,
};
static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
/* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
/* F2FS_CRYPT_PFE */ &pfk_f2fs_allow_merge_bio,
};
static void __exit pfk_exit(void)
{
pfk_ready = false;
pfk_ext4_deinit();
pfk_f2fs_deinit();
pfk_kc_deinit();
}
static int __init pfk_init(void)
{
int ret = 0;
ret = pfk_ext4_init();
if (ret != 0)
goto fail;
ret = pfk_f2fs_init();
if (ret != 0)
goto fail;
ret = pfk_kc_init(true);
if (ret != 0 && ret != -EAGAIN) {
pr_err("could init pfk key cache, error %d\n", ret);
pfk_ext4_deinit();
pfk_f2fs_deinit();
goto fail;
}
pfk_ready = true;
pr_info("Driver initialized successfully\n");
return 0;
fail:
pr_err("Failed to init driver\n");
return -ENODEV;
}
/*
* If more than one type is supported simultaneously, this function will also
* set the priority between them
*/
static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
{
if (!inode)
return INVALID_PFE;
if (pfk_is_ext4_type(inode))
return EXT4_CRYPT_PFE;
if (pfk_is_f2fs_type(inode))
return F2FS_CRYPT_PFE;
return INVALID_PFE;
}
/**
* inode_to_filename() - get the filename from inode pointer.
* @inode: inode pointer
*
* it is used for debug prints.
*
* Return: filename string or "unknown".
*/
char *inode_to_filename(const struct inode *inode)
{
struct dentry *dentry = NULL;
char *filename = NULL;
if (!inode)
return "NULL";
if (hlist_empty(&inode->i_dentry))
return "unknown";
dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
filename = dentry->d_iname;
return filename;
}
/**
* pfk_is_ready() - driver is initialized and ready.
*
* Return: true if the driver is ready.
*/
static inline bool pfk_is_ready(void)
{
return pfk_ready;
}
/**
* pfk_bio_get_inode() - get the inode from a bio.
* @bio: Pointer to BIO structure.
*
* Walk the bio struct links to get the inode.
* Please note, that in general bio may consist of several pages from
* several files, but in our case we always assume that all pages come
* from the same file, since our logic ensures it. That is why we only
* walk through the first page to look for inode.
*
* Return: pointer to the inode struct if successful, or NULL otherwise.
*
*/
static struct inode *pfk_bio_get_inode(const struct bio *bio)
{
struct address_space *mapping = NULL;
if (!bio)
return NULL;
if (!bio_has_data((struct bio *)bio))
return NULL;
if (!bio->bi_io_vec)
return NULL;
if (!bio->bi_io_vec->bv_page)
return NULL;
if (PageAnon(bio->bi_io_vec->bv_page)) {
struct inode *inode;
/* Using direct-io (O_DIRECT) without page cache */
inode = dio_bio_get_inode((struct bio *)bio);
pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
return inode;
}
mapping = page_mapping(bio->bi_io_vec->bv_page);
if (!mapping)
return NULL;
return mapping->host;
}
/**
* pfk_key_size_to_key_type() - translate key size to key size enum
* @key_size: key size in bytes
* @key_size_type: pointer to store the output enum (can be null)
*
* return 0 in case of success, error otherwise (i.e not supported key size)
*/
int pfk_key_size_to_key_type(size_t key_size,
enum ice_crpto_key_size *key_size_type)
{
/*
* currently only 32 bit key size is supported
* in the future, table with supported key sizes might
* be introduced
*/
if (key_size != PFK_SUPPORTED_KEY_SIZE) {
pr_err("not supported key size %zu\n", key_size);
return -EINVAL;
}
if (key_size_type)
*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
return 0;
}
/*
* Retrieves filesystem type from inode's superblock
*/
bool pfe_is_inode_filesystem_type(const struct inode *inode,
const char *fs_type)
{
if (!inode || !fs_type)
return false;
if (!inode->i_sb)
return false;
if (!inode->i_sb->s_type)
return false;
return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
}
/**
* pfk_get_key_for_bio() - get the encryption key to be used for a bio
*
* @bio: pointer to the BIO
* @key_info: pointer to the key information which will be filled in
* @algo_mode: optional pointer to the algorithm identifier which will be set
* @is_pfe: will be set to false if the BIO should be left unencrypted
*
* Return: 0 if a key is being used, otherwise a -errno value
*/
static int pfk_get_key_for_bio(const struct bio *bio,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo_mode,
bool *is_pfe, unsigned int *data_unit)
{
const struct inode *inode;
enum pfe_type which_pfe;
const struct blk_encryption_key *key = NULL;
char *s_type = NULL;
inode = pfk_bio_get_inode(bio);
which_pfe = pfk_get_pfe_type(inode);
s_type = (char *)pfk_kc_get_storage_type();
/*
* Update dun based on storage type.
* 512 byte dun - For ext4 emmc
* 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc
*/
if (data_unit && bio) {
if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc")))
*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
else
*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
}
if (which_pfe != INVALID_PFE) {
/* Encrypted file; override ->bi_crypt_key */
pr_debug("parsing inode %lu with PFE type %d\n",
inode->i_ino, which_pfe);
return (*(pfk_parse_inode_ftable[which_pfe]))
(bio, inode, key_info, algo_mode, is_pfe);
}
/*
* bio is not for an encrypted file. Use ->bi_crypt_key if it was set.
* Otherwise, don't encrypt/decrypt the bio.
*/
#ifdef CONFIG_DM_DEFAULT_KEY
key = bio->bi_crypt_key;
#endif
if (!key) {
*is_pfe = false;
return -EINVAL;
}
/* Note: the "salt" is really just the second half of the XTS key. */
BUILD_BUG_ON(sizeof(key->raw) !=
PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
key_info->key = &key->raw[0];
key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
if (algo_mode)
*algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
return 0;
}
/**
* pfk_load_key_start() - loads PFE encryption key to the ICE
* Can also be invoked from non
* PFE context, in this case it
* is not relevant and is_pfe
* flag is set to false
*
* @bio: Pointer to the BIO structure
* @ice_setting: Pointer to ice setting structure that will be filled with
* ice configuration values, including the index to which the key was loaded
* @is_pfe: will be false if inode is not relevant to PFE, in such a case
* it should be treated as non PFE by the block layer
*
* Returns the index where the key is stored in encryption hw and additional
* information that will be used later for configuration of the encryption hw.
*
* Must be followed by pfk_load_key_end when key is no longer used by ice
*
*/
int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe,
bool async)
{
int ret = 0;
struct pfk_key_info key_info = {NULL, NULL, 0, 0};
enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
enum ice_crpto_key_size key_size_type = 0;
unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
u32 key_index = 0;
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
if (!ice_setting) {
pr_err("ice setting is NULL\n");
return -EINVAL;
}
ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe,
&data_unit);
if (ret != 0)
return ret;
ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
if (ret != 0)
return ret;
ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
key_info.salt, key_info.salt_size, &key_index, async,
data_unit);
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("start: could not load key into pfk key cache, error %d\n",
ret);
return ret;
}
ice_setting->key_size = key_size_type;
ice_setting->algo_mode = algo_mode;
/* hardcoded for now */
ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
ice_setting->key_index = key_index;
pr_debug("loaded key for file %s key_index %d\n",
inode_to_filename(pfk_bio_get_inode(bio)), key_index);
return 0;
}
/**
* pfk_load_key_end() - marks the PFE key as no longer used by ICE
* Can also be invoked from non
* PFE context, in this case it is not
* relevant and is_pfe flag is
* set to false
*
* @bio: Pointer to the BIO structure
* @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
* from PFE context
*/
int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
int ret = 0;
struct pfk_key_info key_info = {NULL, NULL, 0, 0};
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL);
if (ret != 0)
return ret;
pfk_kc_load_key_end(key_info.key, key_info.key_size,
key_info.salt, key_info.salt_size);
pr_debug("finished using key for file %s\n",
inode_to_filename(pfk_bio_get_inode(bio)));
return 0;
}
/**
* pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
* @bio1: Pointer to first BIO structure.
* @bio2: Pointer to second BIO structure.
*
* Prevent merging of BIOs from encrypted and non-encrypted
* files, or files encrypted with different key.
* Also prevent non encrypted and encrypted data from the same file
* to be merged (ecryptfs header if stored inside file should be non
* encrypted)
* This API is called by the file system block layer.
*
* Return: true if the BIOs allowed to be merged, false
* otherwise.
*/
bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
{
const struct blk_encryption_key *key1 = NULL;
const struct blk_encryption_key *key2 = NULL;
const struct inode *inode1;
const struct inode *inode2;
enum pfe_type which_pfe1;
enum pfe_type which_pfe2;
#ifdef CONFIG_DM_DEFAULT_KEY
key1 = bio1->bi_crypt_key;
key2 = bio2->bi_crypt_key;
#endif
if (!pfk_is_ready())
return false;
if (!bio1 || !bio2)
return false;
if (bio1 == bio2)
return true;
key1 = bio1->bi_crypt_key;
key2 = bio2->bi_crypt_key;
inode1 = pfk_bio_get_inode(bio1);
inode2 = pfk_bio_get_inode(bio2);
which_pfe1 = pfk_get_pfe_type(inode1);
which_pfe2 = pfk_get_pfe_type(inode2);
/*
* If one bio is for an encrypted file and the other is for a different
* type of encrypted file or for blocks that are not part of an
* encrypted file, do not merge.
*/
if (which_pfe1 != which_pfe2)
return false;
if (which_pfe1 != INVALID_PFE) {
/* Both bios are for the same type of encrypted file. */
return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
inode1, inode2);
}
/*
* Neither bio is for an encrypted file. Merge only if the default keys
* are the same (or both are NULL).
*/
return key1 == key2 ||
(key1 && key2 &&
!crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
}
int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
int ret = -EINVAL;
if (!key || !salt)
return ret;
ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size);
if (ret)
pr_err("Clear key error: ret value %d\n", ret);
return ret;
}
/**
* Flush key table on storage core reset. During core reset key configuration
* is lost in ICE. We need to flash the cache, so that the keys will be
* reconfigured again for every subsequent transaction
*/
void pfk_clear_on_reset(void)
{
if (!pfk_is_ready())
return;
pfk_kc_clear_on_reset();
}
module_init(pfk_init);
module_exit(pfk_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Per-File-Key driver");

View File

@ -1,212 +0,0 @@
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Per-File-Key (PFK) - EXT4
*
* This driver is used for working with EXT4 crypt extension
*
* The key information is stored in node by EXT4 when file is first opened
* and will be later accessed by Block Device Driver to actually load the key
* to encryption hw.
*
* PFK exposes API's for loading and removing keys from encryption hw
* and also API to determine whether 2 adjacent blocks can be agregated by
* Block Layer in one request to encryption hw.
*
*/
/* Uncomment the line below to enable debug messages */
/* #define DEBUG 1 */
#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include "fscrypt_ice.h"
#include "pfk_ext4.h"
//#include "ext4_ice.h"
static bool pfk_ext4_ready;
/*
* pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
*/
void pfk_ext4_deinit(void)
{
pfk_ext4_ready = false;
}
/*
* pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
*/
int __init pfk_ext4_init(void)
{
pfk_ext4_ready = true;
pr_info("PFK EXT4 inited successfully\n");
return 0;
}
/**
* pfk_ecryptfs_is_ready() - driver is initialized and ready.
*
* Return: true if the driver is ready.
*/
static inline bool pfk_ext4_is_ready(void)
{
return pfk_ext4_ready;
}
/**
* pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
*
*
*/
/*
* static void pfk_ext4_dump_inode(const struct inode* inode)
* {
* struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
*
* pr_debug("dumping inode with address 0x%p\n", inode);
* pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
* pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
* ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
* if (ci) {
* pr_debug("crypt_info address 0x%p\n", ci);
* pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
* } else {
* pr_debug("crypt_info is NULL\n");
* }
* }
*/
/**
* pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
* @inode: inode pointer
*/
bool pfk_is_ext4_type(const struct inode *inode)
{
if (!pfe_is_inode_filesystem_type(inode, "ext4"))
return false;
return fscrypt_should_be_processed_by_ice(inode);
}
/**
* pfk_ext4_parse_cipher() - parse cipher from inode to enum
* @inode: inode
* @algo: pointer to store the output enum (can be null)
*
* return 0 in case of success, error otherwise (i.e not supported cipher)
*/
static int pfk_ext4_parse_cipher(const struct inode *inode,
enum ice_cryto_algo_mode *algo)
{
/*
* currently only AES XTS algo is supported
* in the future, table with supported ciphers might
* be introduced
*/
if (!inode)
return -EINVAL;
if (!fscrypt_is_aes_xts_cipher(inode)) {
pr_err("ext4 alghoritm is not supported by pfk\n");
return -EINVAL;
}
if (algo)
*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
return 0;
}
int pfk_ext4_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe)
{
int ret = 0;
if (!is_pfe)
return -EINVAL;
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_ext4_is_ready())
return -ENODEV;
if (!inode)
return -EINVAL;
if (!key_info)
return -EINVAL;
key_info->key = fscrypt_get_ice_encryption_key(inode);
if (!key_info->key) {
pr_err("could not parse key from ext4\n");
return -EINVAL;
}
key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
if (!key_info->key_size) {
pr_err("could not parse key size from ext4\n");
return -EINVAL;
}
key_info->salt = fscrypt_get_ice_encryption_salt(inode);
if (!key_info->salt) {
pr_err("could not parse salt from ext4\n");
return -EINVAL;
}
key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
if (!key_info->salt_size) {
pr_err("could not parse salt size from ext4\n");
return -EINVAL;
}
ret = pfk_ext4_parse_cipher(inode, algo);
if (ret != 0) {
pr_err("not supported cipher\n");
return ret;
}
return 0;
}
bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2)
{
/* if there is no ext4 pfk, don't disallow merging blocks */
if (!pfk_ext4_is_ready())
return true;
if (!inode1 || !inode2)
return false;
return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
}

Some files were not shown because too many files have changed in this diff Show More