mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5HEegACgkQONu9yGCS aT6cNw//eNxBC6h0ibDOVeVbItkku2c0lwPRSrxtvhyUbE/RG63FlisA2TFEOUp8 y+Ionderw4+d8ySuExEcsE644d/ykES1Lj1dVR4Lzlbxo33X6p9opN9GCMHH1VH2 sRVWR8xLTUxzamGm4i5rUXMDplqTB+pTscVWJbisH1o3XW6SGnxAX3UjvqCHhjrW 9iLPBMZF/KkZDvhYbfl59QsB+FuLsjET3D1n+csypCzlZTf7zbi96tPMidprvhyt xWYl4NZCyLxqUFmKFILsWK3KN7tQNXMFILfwUMOVcn15689GGXRS1X4W7OA+nOKs rnSYM1KHBrsLHWdnLLcHh+qq7sKvgsIpIhvEyixl52c+qoMB1qJC+soZhhGJ4/kQ 93nl5ibHNUmPwc7a+R2G2U6C83aAS1zvV4LFIBtPXMc9oKOISDBkQgRJhsJ0HmQA 4euecjN7o1V+sE1LzvtMja6XcXqDEe7BrgT4e5TyL0Gd7IUGSbwyt61oIKPODcHM YWn4hnflpDXOxYtSiqFWTi9psrdja54G0b1eBuhJ4ve9Katdqb1xhj2p9+HRt5rr 38f3nNOHLI/ozWL4DrWVSUgxWM86zqr6cyI2iJmINIOVH5+oWjq4RzinT5TNbQby mgFNw/0rOyJXjdRiTB2qiOQZSaiAp/NbfO9OBlnBgR2BGcF5F6U= =BC1c -----END PGP SIGNATURE----- Merge 4.14.171 into android-4.14 Changes in 4.14.171 kernel/module: Fix memleak in module_add_modinfo_attrs() media: iguanair: fix endpoint sanity check x86/cpu: Update cached HLE state on write to TSX_CTRL_CPUID_CLEAR iwlwifi: mvm: fix NVM check for 3168 devices sparc32: fix struct ipc64_perm type definition cls_rsvp: fix rsvp_policy gtp: use __GFP_NOWARN to avoid memalloc warning l2tp: Allow duplicate session creation with UDP net: hsr: fix possible NULL deref in hsr_handle_frame() net_sched: fix an OOB access in cls_tcindex bnxt_en: Fix TC queue mapping. tcp: clear tp->total_retrans in tcp_disconnect() tcp: clear tp->delivered in tcp_disconnect() tcp: clear tp->data_segs{in|out} in tcp_disconnect() tcp: clear tp->segs_{in|out} in tcp_disconnect() rxrpc: Fix insufficient receive notification generation rxrpc: Fix NULL pointer deref due to call->conn being cleared on disconnect media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors mfd: dln2: More sanity checking for endpoints tracing: Fix sched switch start/stop refcount racy updates brcmfmac: Fix memory leak in brcmf_usbdev_qinit usb: gadget: legacy: set max_speed to super-speed usb: gadget: f_ncm: Use atomic_t to track in-flight request usb: gadget: f_ecm: Use atomic_t to track in-flight request ALSA: dummy: Fix PCM format loop in proc output media/v4l2-core: set pages dirty upon releasing DMA buffers media: v4l2-rect.h: fix v4l2_rect_map_inside() top/left adjustments lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more() irqdomain: Fix a memory leak in irq_domain_push_irq() platform/x86: intel_scu_ipc: Fix interrupt support KVM: arm64: Only sign-extend MMIO up to register width MIPS: fix indentation of the 'RELOCS' message s390/mm: fix dynamic pagetable upgrade for hugetlbfs powerpc/xmon: don't access ASDR in VMs powerpc/pseries: Advance pfn if section is not present in lmb_is_removable() mmc: spi: Toggle SPI polarity, do not hardcode it ACPI: video: Do not export a non working backlight interface on MSI MS-7721 boards alarmtimer: Unregister wakeup source when module get fails ubifs: Reject unsupported ioctl flags explicitly ubifs: Fix FS_IOC_SETFLAGS unexpectedly clearing encrypt flag ubifs: Fix deadlock in concurrent bulk-read and writepage PCI: keystone: Fix link training retries initiation mmc: sdhci-of-at91: fix memleak on clk_get failure ubifs: don't trigger assertion on invalid no-key filename hv_balloon: Balloon up according to request page number crypto: api - Check spawn->alg under lock in crypto_drop_spawn scsi: qla2xxx: Fix mtcp dump collection failure power: supply: ltc2941-battery-gauge: fix use-after-free f2fs: choose hardlimit when softlimit is larger than hardlimit in f2fs_statfs_project() f2fs: fix miscounted block limit in f2fs_statfs_project() f2fs: code cleanup for f2fs_statfs_project() PM: core: Fix handling of devices deleted during system-wide resume of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc dm zoned: support zone sizes smaller than 128MiB dm space map common: fix to ensure new block isn't already in use dm crypt: fix benbi IV constructor crash if used in authenticated mode tracing: Annotate ftrace_graph_hash pointer with __rcu tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu ftrace: Add comment to why rcu_dereference_sched() is open coded ftrace: Protect ftrace_graph_hash with ftrace_sync samples/bpf: Don't try to remove user's homedir on clean crypto: ccp - set max RSA modulus size for v3 platform devices as well crypto: pcrypt - Do not clear MAY_SLEEP flag in original request crypto: atmel-aes - Fix counter overflow in CTR mode crypto: api - Fix race condition in crypto_spawn_alg crypto: picoxcell - adjust the position of tasklet_init and fix missed tasklet_kill scsi: qla2xxx: Fix unbound NVME response length NFS: Fix memory leaks and corruption in readdir NFS: Directory page cache pages need to be locked when read btrfs: set trans->drity in btrfs_commit_transaction ARM: tegra: Enable PLLP bypass during Tegra124 LP1 iwlwifi: don't throw error when trying to remove IGTK mwifiex: fix unbalanced locking in mwifiex_process_country_ie() sunrpc: expiry_time should be seconds not timeval tools/kvm_stat: Fix kvm_exit filter name xen/balloon: Support xend-based toolstack take two KVM: x86: Refactor picdev_write() to prevent Spectre-v1/L1TF attacks KVM: x86: Refactor prefix decoding to prevent Spectre-v1/L1TF attacks KVM: x86: Protect DR-based index computations from Spectre-v1/L1TF attacks KVM: x86: Protect kvm_lapic_reg_write() from Spectre-v1/L1TF attacks KVM: x86: Protect kvm_hv_msr_[get|set]_crash_data() from Spectre-v1/L1TF attacks KVM: x86: Protect ioapic_write_indirect() from Spectre-v1/L1TF attacks KVM: x86: Protect MSR-based index computations in pmu.h from Spectre-v1/L1TF attacks KVM: x86: Protect ioapic_read_indirect() from Spectre-v1/L1TF attacks KVM: x86: Protect MSR-based index computations from Spectre-v1/L1TF attacks in x86.c KVM: x86: Protect x86_decode_insn from Spectre-v1/L1TF attacks KVM: x86: Protect MSR-based index computations in fixed_msr_to_seg_unit() from Spectre-v1/L1TF attacks KVM: PPC: Book3S HV: Uninit vCPU if vcore creation fails KVM: PPC: Book3S PR: Free shared page if mmu initialization fails KVM: x86: Free wbinvd_dirty_mask if vCPU creation fails clk: tegra: Mark fuse clock as critical scsi: qla2xxx: Fix the endianness of the qla82xx_get_fw_size() return type scsi: csiostor: Adjust indentation in csio_device_reset scsi: qla4xxx: Adjust indentation in qla4xxx_mem_free scsi: ufs: Recheck bkops level if bkops is disabled phy: qualcomm: Adjust indentation in read_poll_timeout ext2: Adjust indentation in ext2_fill_super powerpc/44x: Adjust indentation in ibm4xx_denali_fixup_memsize NFC: pn544: Adjust indentation in pn544_hci_check_presence ppp: Adjust indentation into ppp_async_input net: smc911x: Adjust indentation in smc911x_phy_configure net: tulip: Adjust indentation in {dmfe, uli526x}_init_module IB/mlx5: Fix outstanding_pi index for GSI qps IB/core: Fix ODP get user pages flow nfsd: fix delay timer on 32-bit architectures nfsd: fix jiffies/time_t mixup in LRU list ubi: fastmap: Fix inverted logic in seen selfcheck ubi: Fix an error pointer dereference in error handling code mfd: da9062: Fix watchdog compatible string mfd: rn5t618: Mark ADC control register volatile net: dsa: bcm_sf2: Only 7278 supports 2Gb/sec IMP port net_sched: fix a resource leak in tcindex_set_parms() net: systemport: Avoid RBUF stuck in Wake-on-LAN mode net: macb: Remove unnecessary alignment check for TSO net: macb: Limit maximum GEM TX length in TSO bonding/alb: properly access headers in bond_alb_xmit() ext4: fix deadlock allocating crypto bounce page from mempool btrfs: Get rid of the confusing btrfs_file_extent_inline_len Btrfs: fix assertion failure on fsync with NO_HOLES enabled Btrfs: fix missing hole after hole punching and fsync when using NO_HOLES btrfs: use bool argument in free_root_pointers() btrfs: free block groups after free'ing fs trees btrfs: remove trivial locking wrappers of tree mod log Btrfs: fix race between adding and putting tree mod seq elements and nodes drm: atmel-hlcdc: enable clock before configuring timing engine KVM: x86: Protect pmu_intel.c from Spectre-v1/L1TF attacks btrfs: flush write bio if we loop in extent_write_cache_pages KVM: x86: Fix potential put_fpu() w/o load_fpu() on MPX platform KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM KVM: VMX: Add non-canonical check on writes to RTIT address MSRs KVM: nVMX: vmread should not set rflags to specify success in case of #PF KVM: Use vcpu-specific gva->hva translation when querying host page size KVM: Play nice with read-only memslots when querying host page size KVM: s390: do not clobber registers during guest reset/store status cifs: fail i/o on soft mounts if sessionsetup errors out clocksource: Prevent double add_timer_on() for watchdog_timer perf/core: Fix mlock accounting in perf_mmap() rxrpc: Fix service call disconnection ASoC: pcm: update FE/BE trigger order based on the command hv_sock: Remove the accept port restriction RDMA/netlink: Do not always generate an ACK for some netlink operations scsi: ufs: Fix ufshcd_probe_hba() reture value in case ufshcd_scsi_add_wlus() fails PCI/switchtec: Fix vep_vector_number ioread width PCI: Don't disable bridge BARs when assigning bus resources nfs: NFS_SWAP should depend on SWAP NFS/pnfs: Fix pnfs_generic_prepare_to_resend_writes() NFSv4: try lease recovery on NFS4ERR_EXPIRED serial: uartps: Add a timeout to the tx empty wait rtc: hym8563: Return -EINVAL if the time is known to be invalid rtc: cmos: Stop using shared IRQ ARC: [plat-axs10x]: Add missing multicast filter number to GMAC node platform/x86: intel_mid_powerbtn: Take a copy of ddata ARM: dts: at91: sama5d3: fix maximum peripheral clock rates ARM: dts: at91: sama5d3: define clock rate range for tcb1 tools/power/acpi: fix compilation error powerpc/pseries/vio: Fix iommu_table use-after-free refcount warning powerpc/pseries: Allow not having ibm, hypertas-functions::hcall-multi-tce for DDW KVM: arm/arm64: vgic-its: Fix restoration of unmapped collections ARM: 8949/1: mm: mark free_memmap as __init arm64: cpufeature: Fix the type of no FP/SIMD capability KVM: arm/arm64: Fix young bit from mmu notifier crypto: artpec6 - return correct error code for failed setkey() crypto: atmel-sha - fix error handling when setting hmac key media: i2c: adv748x: Fix unsafe macros pinctrl: sh-pfc: r8a7778: Fix duplicate SDSELF_B and SD1_CLK_B scsi: megaraid_sas: Do not initiate OCR if controller is not in ready state dm: fix potential for q->make_request_fn NULL pointer serial: uartps: Move the spinlock after the read of the tx empty mwifiex: Fix possible buffer overflows in mwifiex_ret_wmm_get_status() mwifiex: Fix possible buffer overflows in mwifiex_cmd_append_vsie_tlv() libertas: don't exit from lbs_ibss_join_existing() with RCU read lock held libertas: make lbs_ibss_join_existing() return error code on rates overflow Linux 4.14.171 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I2ffa7bd44800917ea3b327486b387470ab5d31b9
612 lines
14 KiB
C
612 lines
14 KiB
C
/*
|
|
* Scatterlist Cryptographic API.
|
|
*
|
|
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
|
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
|
|
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
|
|
*
|
|
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
|
|
* and Nettle, by Niels Möller.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the Free
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
|
* any later version.
|
|
*
|
|
*/
|
|
|
|
#include <linux/err.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kmod.h>
|
|
#include <linux/module.h>
|
|
#include <linux/param.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/completion.h>
|
|
#include "internal.h"
|
|
|
|
LIST_HEAD(crypto_alg_list);
|
|
EXPORT_SYMBOL_GPL(crypto_alg_list);
|
|
DECLARE_RWSEM(crypto_alg_sem);
|
|
EXPORT_SYMBOL_GPL(crypto_alg_sem);
|
|
|
|
BLOCKING_NOTIFIER_HEAD(crypto_chain);
|
|
EXPORT_SYMBOL_GPL(crypto_chain);
|
|
|
|
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
|
|
|
|
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
|
|
{
|
|
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_mod_get);
|
|
|
|
void crypto_mod_put(struct crypto_alg *alg)
|
|
{
|
|
struct module *module = alg->cra_module;
|
|
|
|
crypto_alg_put(alg);
|
|
module_put(module);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_mod_put);
|
|
|
|
static inline int crypto_is_test_larval(struct crypto_larval *larval)
|
|
{
|
|
return larval->alg.cra_driver_name[0];
|
|
}
|
|
|
|
static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
|
|
u32 mask)
|
|
{
|
|
struct crypto_alg *q, *alg = NULL;
|
|
int best = -2;
|
|
|
|
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
|
int exact, fuzzy;
|
|
|
|
if (crypto_is_moribund(q))
|
|
continue;
|
|
|
|
if ((q->cra_flags ^ type) & mask)
|
|
continue;
|
|
|
|
if (crypto_is_larval(q) &&
|
|
!crypto_is_test_larval((struct crypto_larval *)q) &&
|
|
((struct crypto_larval *)q)->mask != mask)
|
|
continue;
|
|
|
|
exact = !strcmp(q->cra_driver_name, name);
|
|
fuzzy = !strcmp(q->cra_name, name);
|
|
if (!exact && !(fuzzy && q->cra_priority > best))
|
|
continue;
|
|
|
|
if (unlikely(!crypto_mod_get(q)))
|
|
continue;
|
|
|
|
best = q->cra_priority;
|
|
if (alg)
|
|
crypto_mod_put(alg);
|
|
alg = q;
|
|
|
|
if (exact)
|
|
break;
|
|
}
|
|
|
|
return alg;
|
|
}
|
|
|
|
static void crypto_larval_destroy(struct crypto_alg *alg)
|
|
{
|
|
struct crypto_larval *larval = (void *)alg;
|
|
|
|
BUG_ON(!crypto_is_larval(alg));
|
|
if (larval->adult)
|
|
crypto_mod_put(larval->adult);
|
|
kfree(larval);
|
|
}
|
|
|
|
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_larval *larval;
|
|
|
|
larval = kzalloc(sizeof(*larval), GFP_KERNEL);
|
|
if (!larval)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
larval->mask = mask;
|
|
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
|
|
larval->alg.cra_priority = -1;
|
|
larval->alg.cra_destroy = crypto_larval_destroy;
|
|
|
|
strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
|
|
init_completion(&larval->completion);
|
|
|
|
return larval;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_larval_alloc);
|
|
|
|
static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
|
|
u32 mask)
|
|
{
|
|
struct crypto_alg *alg;
|
|
struct crypto_larval *larval;
|
|
|
|
larval = crypto_larval_alloc(name, type, mask);
|
|
if (IS_ERR(larval))
|
|
return ERR_CAST(larval);
|
|
|
|
atomic_set(&larval->alg.cra_refcnt, 2);
|
|
|
|
down_write(&crypto_alg_sem);
|
|
alg = __crypto_alg_lookup(name, type, mask);
|
|
if (!alg) {
|
|
alg = &larval->alg;
|
|
list_add(&alg->cra_list, &crypto_alg_list);
|
|
}
|
|
up_write(&crypto_alg_sem);
|
|
|
|
if (alg != &larval->alg) {
|
|
kfree(larval);
|
|
if (crypto_is_larval(alg))
|
|
alg = crypto_larval_wait(alg);
|
|
}
|
|
|
|
return alg;
|
|
}
|
|
|
|
void crypto_larval_kill(struct crypto_alg *alg)
|
|
{
|
|
struct crypto_larval *larval = (void *)alg;
|
|
|
|
down_write(&crypto_alg_sem);
|
|
list_del(&alg->cra_list);
|
|
up_write(&crypto_alg_sem);
|
|
complete_all(&larval->completion);
|
|
crypto_alg_put(alg);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_larval_kill);
|
|
|
|
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
|
{
|
|
struct crypto_larval *larval = (void *)alg;
|
|
long timeout;
|
|
|
|
timeout = wait_for_completion_killable_timeout(
|
|
&larval->completion, 60 * HZ);
|
|
|
|
alg = larval->adult;
|
|
if (timeout < 0)
|
|
alg = ERR_PTR(-EINTR);
|
|
else if (!timeout)
|
|
alg = ERR_PTR(-ETIMEDOUT);
|
|
else if (!alg)
|
|
alg = ERR_PTR(-ENOENT);
|
|
else if (crypto_is_test_larval(larval) &&
|
|
!(alg->cra_flags & CRYPTO_ALG_TESTED))
|
|
alg = ERR_PTR(-EAGAIN);
|
|
else if (!crypto_mod_get(alg))
|
|
alg = ERR_PTR(-EAGAIN);
|
|
crypto_mod_put(&larval->alg);
|
|
|
|
return alg;
|
|
}
|
|
|
|
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_alg *alg;
|
|
|
|
down_read(&crypto_alg_sem);
|
|
alg = __crypto_alg_lookup(name, type, mask);
|
|
up_read(&crypto_alg_sem);
|
|
|
|
return alg;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alg_lookup);
|
|
|
|
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_alg *alg;
|
|
|
|
if (!name)
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
|
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
|
if (!alg && !(mask & CRYPTO_NOLOAD)) {
|
|
request_module("crypto-%s", name);
|
|
|
|
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
|
|
CRYPTO_ALG_NEED_FALLBACK))
|
|
request_module("crypto-%s-all", name);
|
|
|
|
alg = crypto_alg_lookup(name, type, mask);
|
|
}
|
|
|
|
if (alg)
|
|
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
|
|
|
return crypto_larval_add(name, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_larval_lookup);
|
|
|
|
int crypto_probing_notify(unsigned long val, void *v)
|
|
{
|
|
int ok;
|
|
|
|
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
|
|
if (ok == NOTIFY_DONE) {
|
|
request_module("cryptomgr");
|
|
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
|
|
}
|
|
|
|
return ok;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_probing_notify);
|
|
|
|
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_alg *alg;
|
|
struct crypto_alg *larval;
|
|
int ok;
|
|
|
|
if (!((type | mask) & CRYPTO_ALG_TESTED)) {
|
|
type |= CRYPTO_ALG_TESTED;
|
|
mask |= CRYPTO_ALG_TESTED;
|
|
}
|
|
|
|
/*
|
|
* If the internal flag is set for a cipher, require a caller to
|
|
* to invoke the cipher with the internal flag to use that cipher.
|
|
* Also, if a caller wants to allocate a cipher that may or may
|
|
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
|
|
* !(mask & CRYPTO_ALG_INTERNAL).
|
|
*/
|
|
if (!((type | mask) & CRYPTO_ALG_INTERNAL))
|
|
mask |= CRYPTO_ALG_INTERNAL;
|
|
|
|
larval = crypto_larval_lookup(name, type, mask);
|
|
if (IS_ERR(larval) || !crypto_is_larval(larval))
|
|
return larval;
|
|
|
|
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
|
|
|
|
if (ok == NOTIFY_STOP)
|
|
alg = crypto_larval_wait(larval);
|
|
else {
|
|
crypto_mod_put(larval);
|
|
alg = ERR_PTR(-ENOENT);
|
|
}
|
|
crypto_larval_kill(larval);
|
|
return alg;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
|
|
|
|
static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
|
{
|
|
const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
|
|
|
|
if (type_obj)
|
|
return type_obj->init(tfm, type, mask);
|
|
|
|
switch (crypto_tfm_alg_type(tfm)) {
|
|
case CRYPTO_ALG_TYPE_CIPHER:
|
|
return crypto_init_cipher_ops(tfm);
|
|
|
|
case CRYPTO_ALG_TYPE_COMPRESS:
|
|
return crypto_init_compress_ops(tfm);
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
BUG();
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void crypto_exit_ops(struct crypto_tfm *tfm)
|
|
{
|
|
const struct crypto_type *type = tfm->__crt_alg->cra_type;
|
|
|
|
if (type && tfm->exit)
|
|
tfm->exit(tfm);
|
|
}
|
|
|
|
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
|
|
{
|
|
const struct crypto_type *type_obj = alg->cra_type;
|
|
unsigned int len;
|
|
|
|
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
|
if (type_obj)
|
|
return len + type_obj->ctxsize(alg, type, mask);
|
|
|
|
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
|
default:
|
|
BUG();
|
|
|
|
case CRYPTO_ALG_TYPE_CIPHER:
|
|
len += crypto_cipher_ctxsize(alg);
|
|
break;
|
|
|
|
case CRYPTO_ALG_TYPE_COMPRESS:
|
|
len += crypto_compress_ctxsize(alg);
|
|
break;
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
static void crypto_shoot_alg(struct crypto_alg *alg)
|
|
{
|
|
down_write(&crypto_alg_sem);
|
|
alg->cra_flags |= CRYPTO_ALG_DYING;
|
|
up_write(&crypto_alg_sem);
|
|
}
|
|
|
|
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
|
u32 mask)
|
|
{
|
|
struct crypto_tfm *tfm = NULL;
|
|
unsigned int tfm_size;
|
|
int err = -ENOMEM;
|
|
|
|
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
|
|
tfm = kzalloc(tfm_size, GFP_KERNEL);
|
|
if (tfm == NULL)
|
|
goto out_err;
|
|
|
|
tfm->__crt_alg = alg;
|
|
|
|
err = crypto_init_ops(tfm, type, mask);
|
|
if (err)
|
|
goto out_free_tfm;
|
|
|
|
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
|
goto cra_init_failed;
|
|
|
|
goto out;
|
|
|
|
cra_init_failed:
|
|
crypto_exit_ops(tfm);
|
|
out_free_tfm:
|
|
if (err == -EAGAIN)
|
|
crypto_shoot_alg(alg);
|
|
kfree(tfm);
|
|
out_err:
|
|
tfm = ERR_PTR(err);
|
|
out:
|
|
return tfm;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
|
|
|
|
/*
|
|
* crypto_alloc_base - Locate algorithm and allocate transform
|
|
* @alg_name: Name of algorithm
|
|
* @type: Type of algorithm
|
|
* @mask: Mask for type comparison
|
|
*
|
|
* This function should not be used by new algorithm types.
|
|
* Please use crypto_alloc_tfm instead.
|
|
*
|
|
* crypto_alloc_base() will first attempt to locate an already loaded
|
|
* algorithm. If that fails and the kernel supports dynamically loadable
|
|
* modules, it will then attempt to load a module of the same name or
|
|
* alias. If that fails it will send a query to any loaded crypto manager
|
|
* to construct an algorithm on the fly. A refcount is grabbed on the
|
|
* algorithm which is then associated with the new transform.
|
|
*
|
|
* The returned transform is of a non-determinate type. Most people
|
|
* should use one of the more specific allocation functions such as
|
|
* crypto_alloc_blkcipher.
|
|
*
|
|
* In case of error the return value is an error pointer.
|
|
*/
|
|
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
|
|
{
|
|
struct crypto_tfm *tfm;
|
|
int err;
|
|
|
|
for (;;) {
|
|
struct crypto_alg *alg;
|
|
|
|
alg = crypto_alg_mod_lookup(alg_name, type, mask);
|
|
if (IS_ERR(alg)) {
|
|
err = PTR_ERR(alg);
|
|
goto err;
|
|
}
|
|
|
|
tfm = __crypto_alloc_tfm(alg, type, mask);
|
|
if (!IS_ERR(tfm))
|
|
return tfm;
|
|
|
|
crypto_mod_put(alg);
|
|
err = PTR_ERR(tfm);
|
|
|
|
err:
|
|
if (err != -EAGAIN)
|
|
break;
|
|
if (fatal_signal_pending(current)) {
|
|
err = -EINTR;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ERR_PTR(err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_base);
|
|
|
|
void *crypto_create_tfm(struct crypto_alg *alg,
|
|
const struct crypto_type *frontend)
|
|
{
|
|
char *mem;
|
|
struct crypto_tfm *tfm = NULL;
|
|
unsigned int tfmsize;
|
|
unsigned int total;
|
|
int err = -ENOMEM;
|
|
|
|
tfmsize = frontend->tfmsize;
|
|
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
|
|
|
|
mem = kzalloc(total, GFP_KERNEL);
|
|
if (mem == NULL)
|
|
goto out_err;
|
|
|
|
tfm = (struct crypto_tfm *)(mem + tfmsize);
|
|
tfm->__crt_alg = alg;
|
|
|
|
err = frontend->init_tfm(tfm);
|
|
if (err)
|
|
goto out_free_tfm;
|
|
|
|
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
|
goto cra_init_failed;
|
|
|
|
goto out;
|
|
|
|
cra_init_failed:
|
|
crypto_exit_ops(tfm);
|
|
out_free_tfm:
|
|
if (err == -EAGAIN)
|
|
crypto_shoot_alg(alg);
|
|
kfree(mem);
|
|
out_err:
|
|
mem = ERR_PTR(err);
|
|
out:
|
|
return mem;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_create_tfm);
|
|
|
|
struct crypto_alg *crypto_find_alg(const char *alg_name,
|
|
const struct crypto_type *frontend,
|
|
u32 type, u32 mask)
|
|
{
|
|
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
|
|
crypto_alg_mod_lookup;
|
|
|
|
if (frontend) {
|
|
type &= frontend->maskclear;
|
|
mask &= frontend->maskclear;
|
|
type |= frontend->type;
|
|
mask |= frontend->maskset;
|
|
|
|
if (frontend->lookup)
|
|
lookup = frontend->lookup;
|
|
}
|
|
|
|
return lookup(alg_name, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_find_alg);
|
|
|
|
/*
|
|
* crypto_alloc_tfm - Locate algorithm and allocate transform
|
|
* @alg_name: Name of algorithm
|
|
* @frontend: Frontend algorithm type
|
|
* @type: Type of algorithm
|
|
* @mask: Mask for type comparison
|
|
*
|
|
* crypto_alloc_tfm() will first attempt to locate an already loaded
|
|
* algorithm. If that fails and the kernel supports dynamically loadable
|
|
* modules, it will then attempt to load a module of the same name or
|
|
* alias. If that fails it will send a query to any loaded crypto manager
|
|
* to construct an algorithm on the fly. A refcount is grabbed on the
|
|
* algorithm which is then associated with the new transform.
|
|
*
|
|
* The returned transform is of a non-determinate type. Most people
|
|
* should use one of the more specific allocation functions such as
|
|
* crypto_alloc_blkcipher.
|
|
*
|
|
* In case of error the return value is an error pointer.
|
|
*/
|
|
void *crypto_alloc_tfm(const char *alg_name,
|
|
const struct crypto_type *frontend, u32 type, u32 mask)
|
|
{
|
|
void *tfm;
|
|
int err;
|
|
|
|
for (;;) {
|
|
struct crypto_alg *alg;
|
|
|
|
alg = crypto_find_alg(alg_name, frontend, type, mask);
|
|
if (IS_ERR(alg)) {
|
|
err = PTR_ERR(alg);
|
|
goto err;
|
|
}
|
|
|
|
tfm = crypto_create_tfm(alg, frontend);
|
|
if (!IS_ERR(tfm))
|
|
return tfm;
|
|
|
|
crypto_mod_put(alg);
|
|
err = PTR_ERR(tfm);
|
|
|
|
err:
|
|
if (err != -EAGAIN)
|
|
break;
|
|
if (fatal_signal_pending(current)) {
|
|
err = -EINTR;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ERR_PTR(err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
|
|
|
|
/*
|
|
* crypto_destroy_tfm - Free crypto transform
|
|
* @mem: Start of tfm slab
|
|
* @tfm: Transform to free
|
|
*
|
|
* This function frees up the transform and any associated resources,
|
|
* then drops the refcount on the associated algorithm.
|
|
*/
|
|
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
|
|
{
|
|
struct crypto_alg *alg;
|
|
|
|
if (unlikely(!mem))
|
|
return;
|
|
|
|
alg = tfm->__crt_alg;
|
|
|
|
if (!tfm->exit && alg->cra_exit)
|
|
alg->cra_exit(tfm);
|
|
crypto_exit_ops(tfm);
|
|
crypto_mod_put(alg);
|
|
kzfree(mem);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
|
|
|
|
int crypto_has_alg(const char *name, u32 type, u32 mask)
|
|
{
|
|
int ret = 0;
|
|
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
|
|
|
|
if (!IS_ERR(alg)) {
|
|
crypto_mod_put(alg);
|
|
ret = 1;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_has_alg);
|
|
|
|
void crypto_req_done(struct crypto_async_request *req, int err)
|
|
{
|
|
struct crypto_wait *wait = req->data;
|
|
|
|
if (err == -EINPROGRESS)
|
|
return;
|
|
|
|
wait->err = err;
|
|
complete(&wait->completion);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_req_done);
|
|
|
|
MODULE_DESCRIPTION("Cryptographic core API");
|
|
MODULE_LICENSE("GPL");
|