mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge remote-tracking branch 'google/android-4.14-stable' into richelieu
* google/android-4.14-stable: UPSTREAM: lib/vsprintf: Hash printed address for netdev bits fallback UPSTREAM: lib/vsprintf: Prepare for more general use of ptr_to_id() UPSTREAM: lib/vsprintf: Make ptr argument conts in ptr_to_id() UPSTREAM: vsprintf: Replace memory barrier with static_key for random_ptr_key update UPSTREAM: lib/test_printf.c: accept "ptrval" as valid result for plain 'p' tests UPSTREAM: lib/vsprintf: Do not handle %pO[^F] as %px BACKPORT: l2tp: fix race in pppol2tp_release with session object destroy BACKPORT: l2tp: don't use inet_shutdown on ppp session destroy Linux 4.14.285 tcp: drop the hash_32() part from the index calculation tcp: increase source port perturb table to 2^16 tcp: dynamically allocate the perturb table used by source ports tcp: add small random increments to the source port tcp: use different parts of the port_offset for index and offset tcp: add some entropy in __inet_hash_connect() xprtrdma: fix incorrect header size calculations usb: gadget: u_ether: fix regression in setting fixed MAC address s390/mm: use non-quiescing sske for KVM switch to keyed guest l2tp: fix race in pppol2tp_release with session object destroy l2tp: don't use inet_shutdown on ppp session destroy virtio-pci: Remove wrong address verification in vp_del_vqs() ext4: add reserved GDT blocks check ext4: make variable "count" signed ext4: fix bug_on ext4_mb_use_inode_pa serial: 8250: Store to lsr_save_flags after lsr read usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe usb: dwc2: Fix memory leak in dwc2_hcd_init USB: serial: io_ti: add Agilent E5805A support USB: serial: option: add support for Cinterion MV31 with new baseline comedi: vmk80xx: fix expression for tx buffer size irqchip/gic/realview: Fix refcount leak in realview_gic_of_init certs/blacklist_hashes.c: fix const confusion in certs blacklist arm64: ftrace: fix branch range checks net: bgmac: Fix an erroneous kfree() in bgmac_remove() misc: atmel-ssc: Fix IRQ check in ssc_probe tty: goldfish: Fix free_irq() on remove i40e: Fix call trace in setup_tx_descriptors pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE random: credit cpu and bootloader seeds by default net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed scsi: pmcraid: Fix missing resource cleanup in error case scsi: ipr: Fix missing/incorrect resource cleanup in error case scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology scsi: vmw_pvscsi: Expand vcpuHint to 16 bits ASoC: wm8962: Fix suspend while playing music ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo() ASoC: cs42l56: Correct typo in minimum level for SX volume controls ASoC: cs42l52: Correct TLV for Bypass Volume ASoC: cs53l30: Correct number of volume levels on SX controls ASoC: cs42l52: Fix TLV scales for mixer controls random: account for arch randomness in bits random: mark bootloader randomness code as __init random: avoid checking crng_ready() twice in random_init() crypto: drbg - make reseeding from get_random_bytes() synchronous crypto: drbg - always try to free Jitter RNG instance crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed() crypto: drbg - track whether DRBG was seeded with !rng_is_initialized() crypto: drbg - prepare for more fine-grained tracking of seeding state crypto: drbg - always seeded with SP800-90B compliant noise source crypto: drbg - add FIPS 140-2 CTRNG for noise source Revert "random: use static branch for crng_ready()" random: check for signals after page of pool writes random: wire up fops->splice_{read,write}_iter() random: convert to using fops->write_iter() random: move randomize_page() into mm where it belongs random: move initialization functions out of hot pages random: use proper return types on get_random_{int,long}_wait() random: remove extern from functions in header random: use static branch for crng_ready() random: credit architectural init the exact amount random: handle latent entropy and command line from random_init() random: use proper jiffies comparison macro random: remove ratelimiting for in-kernel unseeded randomness random: avoid initializing twice in credit race random: use symbolic constants for crng_init states siphash: use one source of truth for siphash permutations random: help compiler out with fast_mix() by using simpler arguments random: do not use input pool from hard IRQs random: order timer entropy functions below interrupt functions random: do not pretend to handle premature next security model random: do not use batches when !crng_ready() random: insist on random_get_entropy() existing in order to simplify xtensa: use fallback for random_get_entropy() instead of zero sparc: use fallback for random_get_entropy() instead of zero um: use fallback for random_get_entropy() instead of zero x86/tsc: Use fallback for random_get_entropy() instead of zero nios2: use fallback for random_get_entropy() instead of zero arm: use fallback for random_get_entropy() instead of zero mips: use fallback for random_get_entropy() instead of just c0 random m68k: use fallback for random_get_entropy() instead of zero timekeeping: Add raw clock fallback for random_get_entropy() powerpc: define get_cycles macro for arch-override alpha: define get_cycles macro for arch-override parisc: define get_cycles macro for arch-override s390: define get_cycles macro for arch-override ia64: define get_cycles macro for arch-override init: call time_init() before rand_initialize() random: fix sysctl documentation nits random: document crng_fast_key_erasure() destination possibility random: make random_get_entropy() return an unsigned long random: check for signals every PAGE_SIZE chunk of /dev/[u]random random: check for signal_pending() outside of need_resched() check random: do not allow user to keep crng key around on stack random: do not split fast init input in add_hwgenerator_randomness() random: mix build-time latent entropy into pool at init random: re-add removed comment about get_random_{u32,u64} reseeding random: treat bootloader trust toggle the same way as cpu trust toggle random: skip fast_init if hwrng provides large chunk of entropy random: check for signal and try earlier when generating entropy random: reseed more often immediately after booting random: make consistent usage of crng_ready() random: use SipHash as interrupt entropy accumulator random: replace custom notifier chain with standard one random: don't let 644 read-only sysctls be written to random: give sysctl_random_min_urandom_seed a more sensible value random: do crng pre-init loading in worker rather than irq random: unify cycles_t and jiffies usage and types random: cleanup UUID handling random: only wake up writers after zap if threshold was passed random: round-robin registers as ulong, not u32 random: clear fast pool, crng, and batches in cpuhp bring up random: pull add_hwgenerator_randomness() declaration into random.h random: check for crng_init == 0 in add_device_randomness() random: unify early init crng load accounting random: do not take pool spinlock at boot random: defer fast pool mixing to worker random: rewrite header introductory comment random: group sysctl functions random: group userspace read/write functions random: group entropy collection functions random: group entropy extraction functions random: group initialization wait functions random: remove whitespace and reorder includes random: remove useless header comment random: introduce drain_entropy() helper to declutter crng_reseed() random: deobfuscate irq u32/u64 contributions random: add proper SPDX header random: remove unused tracepoints random: remove ifdef'd out interrupt bench random: tie batched entropy generation to base_crng generation random: zero buffer after reading entropy from userspace random: remove outdated INT_MAX >> 6 check in urandom_read() random: use hash function for crng_slow_load() random: absorb fast pool into input pool after fast load random: do not xor RDRAND when writing into /dev/random random: ensure early RDSEED goes through mixer on init random: inline leaves of rand_initialize() random: use RDSEED instead of RDRAND in entropy extraction random: fix locking in crng_fast_load() random: remove batched entropy locking random: remove use_input_pool parameter from crng_reseed() random: make credit_entropy_bits() always safe random: always wake up entropy writers after extraction random: use linear min-entropy accumulation crediting random: simplify entropy debiting random: use computational hash for entropy extraction random: only call crng_finalize_init() for primary_crng random: access primary_pool directly rather than through pointer random: continually use hwgenerator randomness random: simplify arithmetic function flow in account() random: access input_pool_data directly rather than through pointer random: cleanup fractional entropy shift constants random: prepend remaining pool constants with POOL_ random: de-duplicate INPUT_POOL constants random: remove unused OUTPUT_POOL constants random: rather than entropy_store abstraction, use global random: try to actively add entropy rather than passively wait for it random: remove unused extract_entropy() reserved argument random: remove incomplete last_data logic random: cleanup integer types crypto: chacha20 - Fix chacha20_block() keystream alignment (again) random: cleanup poolinfo abstraction random: fix typo in comments random: don't reset crng_init_cnt on urandom_read() random: avoid superfluous call to RDRAND in CRNG extraction random: early initialization of ChaCha constants random: initialize ChaCha20 constants with correct endianness random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs random: harmonize "crng init done" messages random: mix bootloader randomness into pool random: do not re-init if crng_reseed completes before primary init random: do not sign extend bytes for rotation when mixing random: use BLAKE2s instead of SHA1 in extraction random: remove unused irq_flags argument from add_interrupt_randomness() random: document add_hwgenerator_randomness() with other input functions crypto: blake2s - adjust include guard naming crypto: blake2s - include <linux/bug.h> instead of <asm/bug.h> MAINTAINERS: co-maintain random.c random: remove dead code left over from blocking pool random: avoid arch_get_random_seed_long() when collecting IRQ randomness random: add arch_get_random_*long_early() powerpc: Use bool in archrandom.h linux/random.h: Mark CONFIG_ARCH_RANDOM functions __must_check linux/random.h: Use false with bool linux/random.h: Remove arch_has_random, arch_has_random_seed s390: Remove arch_has_random, arch_has_random_seed powerpc: Remove arch_has_random, arch_has_random_seed x86: Remove arch_has_random, arch_has_random_seed random: avoid warnings for !CONFIG_NUMA builds random: split primary/secondary crng init paths random: remove some dead code of poolinfo random: fix typo in add_timer_randomness() random: Add and use pr_fmt() random: convert to ENTROPY_BITS for better code readability random: remove unnecessary unlikely() random: remove kernel.random.read_wakeup_threshold random: delete code to pull data into pools random: remove the blocking pool random: fix crash on multiple early calls to add_bootloader_randomness() char/random: silence a lockdep splat with printk() random: make /dev/random be almost like /dev/urandom random: ignore GRND_RANDOM in getentropy(2) random: add GRND_INSECURE to return best-effort non-cryptographic bytes random: Add a urandom_read_nowait() for random APIs that don't warn random: Don't wake crng_init_wait when crng_init == 1 lib/crypto: sha1: re-roll loops to reduce code size lib/crypto: blake2s: move hmac construction into wireguard crypto: blake2s - generic C library implementation and selftest crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array() Revert "hwrng: core - Freeze khwrng thread during suspend" char/random: Add a newline at the end of the file random: Use wait_event_freezable() in add_hwgenerator_randomness() fdt: add support for rng-seed random: Support freezable kthreads in add_hwgenerator_randomness() random: fix soft lockup when trying to read from an uninitialized blocking pool latent_entropy: avoid build error when plugin cflags are not set random: document get_random_int() family random: move rand_initialize() earlier random: only read from /dev/random after its pool has received 128 bits drivers/char/random.c: make primary_crng static drivers/char/random.c: remove unused stuct poolinfo::poolbits drivers/char/random.c: constify poolinfo_table random: make CPU trust a boot parameter random: Make crng state queryable random: remove preempt disabled region random: add a config option to trust the CPU's hwrng random: Return nbytes filled from hw RNG random: Fix whitespace pre random-bytes work drivers/char/random.c: remove unused dont_count_entropy random: optimize add_interrupt_randomness random: always fill buffer in get_random_bytes_wait crypto: chacha20 - Fix keystream alignment for chacha20_block() 9p: missing chunk of "fs/9p: Don't update file type when updating file attributes" UPSTREAM: ext4: verify dir block before splitting it UPSTREAM: ext4: fix use-after-free in ext4_rename_dir_prepare BACKPORT: ext4: Only advertise encrypted_casefold when encryption and unicode are enabled BACKPORT: ext4: fix no-key deletion for encrypt+casefold BACKPORT: ext4: optimize match for casefolded encrypted dirs BACKPORT: ext4: handle casefolding with encryption Revert "ANDROID: ext4: Handle casefolding with encryption" Revert "ANDROID: ext4: Optimize match for casefolded encrypted dirs" Revert "ext4: fix use-after-free in ext4_rename_dir_prepare" Revert "ext4: verify dir block before splitting it" Linux 4.14.284 x86/speculation/mmio: Print SMT warning KVM: x86/speculation: Disable Fill buffer clear within guests x86/speculation/mmio: Reuse SRBDS mitigation for SBDS x86/speculation/srbds: Update SRBDS mitigation selection x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data x86/speculation/mmio: Enable CPU Fill buffer clearing on idle x86/bugs: Group MDS, TAA & Processor MMIO Stale Data mitigations x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data x86/speculation: Add a common function for MD_CLEAR mitigation update x86/speculation/mmio: Enumerate Processor MMIO Stale Data bug Documentation: Add documentation for Processor MMIO Stale Data x86/cpu: Add another Alder Lake CPU to the Intel family x86/cpu: Add Lakefield, Alder Lake and Rocket Lake models to the to Intel CPU family x86/cpu: Add Comet Lake to the Intel CPU models header x86/CPU: Add more Icelake model numbers x86/CPU: Add Icelake model number x86/cpu: Add Cannonlake to Intel family x86/cpu: Add Jasper Lake to Intel family cpu/speculation: Add prototype for cpu_show_srbds() x86/cpu: Add Elkhart Lake to Intel family Linux 4.14.283 tcp: fix tcp_mtup_probe_success vs wrong snd_cwnd PCI: qcom: Fix unbalanced PHY init on probe errors mtd: cfi_cmdset_0002: Use chip_ready() for write on S29GL064N mtd: cfi_cmdset_0002: Move and rename chip_check/chip_ready/chip_good_for_write md/raid0: Ignore RAID0 layout if the second zone has only one device powerpc/32: Fix overread/overwrite of thread_struct via ptrace Input: bcm5974 - set missing URB_NO_TRANSFER_DMA_MAP urb flag ixgbe: fix unexpected VLAN Rx in promisc mode on VF ixgbe: fix bcast packets Rx on VF after promisc removal nfc: st21nfca: fix memory leaks in EVT_TRANSACTION handling nfc: st21nfca: fix incorrect validating logic in EVT_TRANSACTION ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files cifs: return errors during session setup during reconnects ALSA: hda/conexant - Fix loopback issue with CX20632 vringh: Fix loop descriptors check in the indirect cases nodemask: Fix return values to be unsigned nbd: fix io hung while disconnecting device nbd: fix race between nbd_alloc_config() and module removal nbd: call genl_unregister_family() first in nbd_cleanup() modpost: fix undefined behavior of is_arm_mapping_symbol() drm/radeon: fix a possible null pointer dereference Revert "net: af_key: add check for pfkey_broadcast in function pfkey_process" md: protect md_unregister_thread from reentrancy kernfs: Separate kernfs_pr_cont_buf and rename_lock. serial: msm_serial: disable interrupts in __msm_console_write() staging: rtl8712: fix uninit-value in r871xu_drv_init() clocksource/drivers/sp804: Avoid error on multiple instances extcon: Modify extcon device to be created after driver data is set misc: rtsx: set NULL intfdata when probe fails usb: dwc2: gadget: don't reset gadget's driver->bus USB: hcd-pci: Fully suspend across freeze/thaw cycle drivers: usb: host: Fix deadlock in oxu_bus_suspend() drivers: tty: serial: Fix deadlock in sa1100_set_termios() USB: host: isp116x: check return value after calling platform_get_resource() drivers: staging: rtl8192e: Fix deadlock in rtllib_beacons_stop() tty: Fix a possible resource leak in icom_probe tty: synclink_gt: Fix null-pointer-dereference in slgt_clean() lkdtm/usercopy: Expand size of "out of frame" object iio: dummy: iio_simple_dummy: check the return value of kstrdup() drm: imx: fix compiler warning with gcc-12 net: altera: Fix refcount leak in altera_tse_mdio_create net: ipv6: unexport __init-annotated seg6_hmac_init() net: xfrm: unexport __init-annotated xfrm4_protocol_init() net: mdio: unexport __init-annotated mdio_bus_init() SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer() net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe xprtrdma: treat all calls not a bcall when bc_serv is NULL video: fbdev: pxa3xx-gcu: release the resources correctly in pxa3xx_gcu_probe/remove() m68knommu: fix undefined reference to `_init_sp' m68knommu: set ZERO_PAGE() to the allocated zeroed page i2c: cadence: Increase timeout per message if necessary tracing: Avoid adding tracer option before update_tracer_options tracing: Fix sleeping function called from invalid context on RT kernel mips: cpc: Fix refcount leak in mips_cpc_default_phys_base perf c2c: Fix sorting in percent_rmt_hitm_cmp() tcp: tcp_rtx_synack() can be called from process context ubi: ubi_create_volume: Fix use-after-free when volume creation failed jffs2: fix memory leak in jffs2_do_fill_super modpost: fix removing numeric suffixes net: dsa: mv88e6xxx: Fix refcount leak in mv88e6xxx_mdios_register net: ethernet: mtk_eth_soc: out of bounds read in mtk_hwlro_get_fdir_entry() clocksource/drivers/oxnas-rps: Fix irq_of_parse_and_map() return value firmware: dmi-sysfs: Fix memory leak in dmi_sysfs_register_handle serial: st-asc: Sanitize CSIZE and correct PARENB for CS7 serial: sh-sci: Don't allow CS5-6 serial: txx9: Don't allow CS5-6 serial: digicolor-usart: Don't allow CS5-6 serial: meson: acquire port->lock in startup() rtc: mt6397: check return value after calling platform_get_resource() soc: rockchip: Fix refcount leak in rockchip_grf_init coresight: cpu-debug: Replace mutex with mutex_trylock on panic notifier rpmsg: qcom_smd: Fix irq_of_parse_and_map() return value pwm: lp3943: Fix duty calculation in case period was clamped USB: storage: karma: fix rio_karma_init return usb: usbip: add missing device lock on tweak configuration cmd usb: usbip: fix a refcount leak in stub_probe() tty: goldfish: Use tty_port_destroy() to destroy port staging: greybus: codecs: fix type confusion of list iterator variable pcmcia: db1xxx_ss: restrict to MIPS_DB1XXX boards netfilter: nf_tables: disallow non-stateful expression in sets earlier MIPS: IP27: Remove incorrect `cpu_has_fpu' override RDMA/rxe: Generate a completion for unsupported/invalid opcode phy: qcom-qmp: fix reset-controller leak on probe errors dt-bindings: gpio: altera: correct interrupt-cells docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0 phy: qcom-qmp: fix struct clk leak on probe errors arm64: dts: qcom: ipq8074: fix the sleep clock frequency gma500: fix an incorrect NULL check on list iterator carl9170: tx: fix an incorrect use of list iterator ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control rtl818x: Prevent using not initialized queues hugetlb: fix huge_pmd_unshare address update nodemask.h: fix compilation error with GCC12 iommu/msm: Fix an incorrect NULL check on list iterator um: Fix out-of-bounds read in LDT setup um: chan_user: Fix winch_tramp() return value mac80211: upgrade passive scan to active scan on DFS channels after beacon rx irqchip: irq-xtensa-mx: fix initial IRQ affinity irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x RDMA/hfi1: Fix potential integer multiplication overflow errors md: fix an incorrect NULL check in md_reload_sb md: fix an incorrect NULL check in does_sb_need_changing drm/bridge: analogix_dp: Grab runtime PM reference for DP-AUX drm/nouveau/clk: Fix an incorrect NULL check on list iterator drm/amdgpu/cs: make commands with 0 chunks illegal behaviour. scsi: ufs: qcom: Add a readl() to make sure ref_clk gets enabled scsi: dc395x: Fix a missing check on list iterator ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock dlm: fix missing lkb refcount handling dlm: fix plock invalid read ext4: avoid cycles in directory h-tree ext4: verify dir block before splitting it ext4: fix bug_on in ext4_writepages ext4: fix use-after-free in ext4_rename_dir_prepare fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages iwlwifi: mvm: fix assert 1F04 upon reconfig wifi: mac80211: fix use-after-free in chanctx code perf jevents: Fix event syntax error caused by ExtSel perf c2c: Use stdio interface if slang is not supported iommu/amd: Increase timeout waiting for GA log enablement video: fbdev: clcdfb: Fix refcount leak in clcdfb_of_vram_setup iommu/mediatek: Add list_del in mtk_iommu_remove mailbox: forward the hrtimer if not queued and under a lock powerpc/fsl_rio: Fix refcount leak in fsl_rio_setup powerpc/perf: Fix the threshold compare group constraint for power9 Input: sparcspkr - fix refcount leak in bbc_beep_probe tty: fix deadlock caused by calling printk() under tty_port->lock powerpc/4xx/cpm: Fix return value of __setup() handler powerpc/idle: Fix return value of __setup() handler powerpc/8xx: export 'cpm_setbrg' for modules drivers/base/node.c: fix compaction sysfs file leak pinctrl: mvebu: Fix irq_of_parse_and_map() return value scsi: fcoe: Fix Wstringop-overflow warnings in fcoe_wwn_from_mac() mfd: ipaq-micro: Fix error check return value of platform_get_irq() ARM: dts: bcm2835-rpi-b: Fix GPIO line names ARM: dts: bcm2835-rpi-zero-w: Fix GPIO line name for Wifi/BT soc: qcom: smsm: Fix missing of_node_put() in smsm_parse_ipc soc: qcom: smp2p: Fix missing of_node_put() in smp2p_parse_ipc rxrpc: Don't try to resend the request if we're receiving the reply rxrpc: Fix listen() setting the bar too high for the prealloc rings ASoC: wm2000: fix missing clk_disable_unprepare() on error in wm2000_anc_transition() sctp: read sk->sk_bound_dev_if once in sctp_rcv() m68k: math-emu: Fix dependencies of math emulation support Bluetooth: fix dangling sco_conn and use-after-free in sco_sock_timeout media: pvrusb2: fix array-index-out-of-bounds in pvr2_i2c_core_init media: exynos4-is: Change clk_disable to clk_disable_unprepare media: st-delta: Fix PM disable depth imbalance in delta_probe regulator: pfuze100: Fix refcount leak in pfuze_parse_regulators_dt ASoC: mxs-saif: Fix refcount leak in mxs_saif_probe media: uvcvideo: Fix missing check to determine if element is found in list drm/msm: return an error pointer in msm_gem_prime_get_sg_table() x86/mm: Cleanup the control_va_addr_alignment() __setup handler irqchip/aspeed-i2c-ic: Fix irq_of_parse_and_map() return value x86: Fix return value of __setup handlers drm/rockchip: vop: fix possible null-ptr-deref in vop_bind() drm/msm/hdmi: check return value after calling platform_get_resource_byname() drm/msm/dsi: fix error checks and return values for DSI xmit functions x86/pm: Fix false positive kmemleak report in msr_build_context() fsnotify: fix wrong lockdep annotations inotify: show inotify mask flags in proc fdinfo ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix spi: img-spfi: Fix pm_runtime_get_sync() error checking HID: hid-led: fix maximum brightness for Dream Cheeky efi: Add missing prototype for efi_capsule_setup_info NFC: NULL out the dev->rfkill to prevent UAF spi: spi-ti-qspi: Fix return value handling of wait_for_completion_timeout drm/mediatek: Fix mtk_cec_mask() x86/delay: Fix the wrong asm constraint in delay_loop() ASoC: mediatek: Fix missing of_node_put in mt2701_wm8960_machine_probe ASoC: mediatek: Fix error handling in mt8173_max98090_dev_probe ath9k: fix ar9003_get_eepmisc drm: fix EDID struct for old ARM OABI format RDMA/hfi1: Prevent panic when SDMA is disabled macintosh/via-pmu: Fix build failure when CONFIG_INPUT is disabled powerpc/xics: fix refcount leak in icp_opal_init() tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate PCI: Avoid pci_dev_lock() AB/BA deadlock with sriov_numvfs_store() ARM: hisi: Add missing of_node_put after of_find_compatible_node ARM: dts: exynos: add atmel,24c128 fallback to Samsung EEPROM ARM: versatile: Add missing of_node_put in dcscb_init fat: add ratelimit to fat*_ent_bread() ARM: OMAP1: clock: Fix UART rate reporting algorithm fs: jfs: fix possible NULL pointer dereference in dbFree() ARM: dts: ox820: align interrupt controller node name with dtschema eth: tg3: silence the GCC 12 array-bounds warning rxrpc: Return an error to sendmsg if call failed media: exynos4-is: Fix compile warning net: phy: micrel: Allow probing without .driver_data ASoC: rt5645: Fix errorenous cleanup order nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags openrisc: start CPU timer early in boot rtlwifi: Use pr_warn instead of WARN_ONCE ipmi:ssif: Check for NULL msg when handling events and messages dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES ASoC: dapm: Don't fold register value changes into notifications ipv6: Don't send rs packets to the interface of ARPHRD_TUNNEL drm/amd/pm: fix the compile warning scsi: megaraid: Fix error check return value of register_chrdev() media: cx25821: Fix the warning when removing the module media: pci: cx23885: Fix the error handling in cx23885_initdev() media: venus: hfi: avoid null dereference in deinit ath9k: fix QCA9561 PA bias level drm/amd/pm: fix double free in si_parse_power_table() ALSA: jack: Access input_dev under mutex ACPICA: Avoid cache flush inside virtual machines ipw2x00: Fix potential NULL dereference in libipw_xmit() b43: Fix assigning negative value to unsigned variable b43legacy: Fix assigning negative value to unsigned variable mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue drm/virtio: fix NULL pointer dereference in virtio_gpu_conn_get_modes btrfs: repair super block num_devices automatically btrfs: add "0x" prefix for unsupported optional features ptrace: Reimplement PTRACE_KILL by always sending SIGKILL ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP USB: new quirk for Dell Gen 2 devices USB: serial: option: add Quectel BG95 modem binfmt_flat: do not stop relocating GOT entries prematurely on riscv
This commit is contained in:
commit
9c642251ed
@ -59,17 +59,18 @@ class
|
||||
|
||||
dma_mode
|
||||
|
||||
Transfer modes supported by the device when in DMA mode.
|
||||
DMA transfer mode used by the device.
|
||||
Mostly used by PATA device.
|
||||
|
||||
pio_mode
|
||||
|
||||
Transfer modes supported by the device when in PIO mode.
|
||||
PIO transfer mode used by the device.
|
||||
Mostly used by PATA device.
|
||||
|
||||
xfer_mode
|
||||
|
||||
Current transfer mode.
|
||||
Mostly used by PATA device.
|
||||
|
||||
id
|
||||
|
||||
|
@ -384,6 +384,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -15,3 +15,4 @@ are configurable at compile, boot or run time.
|
||||
tsx_async_abort
|
||||
multihit.rst
|
||||
special-register-buffer-data-sampling.rst
|
||||
processor_mmio_stale_data.rst
|
||||
|
246
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
Normal file
246
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
Normal file
@ -0,0 +1,246 @@
|
||||
=========================================
|
||||
Processor MMIO Stale Data Vulnerabilities
|
||||
=========================================
|
||||
|
||||
Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O
|
||||
(MMIO) vulnerabilities that can expose data. The sequences of operations for
|
||||
exposing data range from simple to very complex. Because most of the
|
||||
vulnerabilities require the attacker to have access to MMIO, many environments
|
||||
are not affected. System environments using virtualization where MMIO access is
|
||||
provided to untrusted guests may need mitigation. These vulnerabilities are
|
||||
not transient execution attacks. However, these vulnerabilities may propagate
|
||||
stale data into core fill buffers where the data can subsequently be inferred
|
||||
by an unmitigated transient execution attack. Mitigation for these
|
||||
vulnerabilities includes a combination of microcode update and software
|
||||
changes, depending on the platform and usage model. Some of these mitigations
|
||||
are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or
|
||||
those used to mitigate Special Register Buffer Data Sampling (SRBDS).
|
||||
|
||||
Data Propagators
|
||||
================
|
||||
Propagators are operations that result in stale data being copied or moved from
|
||||
one microarchitectural buffer or register to another. Processor MMIO Stale Data
|
||||
Vulnerabilities are operations that may result in stale data being directly
|
||||
read into an architectural, software-visible state or sampled from a buffer or
|
||||
register.
|
||||
|
||||
Fill Buffer Stale Data Propagator (FBSDP)
|
||||
-----------------------------------------
|
||||
Stale data may propagate from fill buffers (FB) into the non-coherent portion
|
||||
of the uncore on some non-coherent writes. Fill buffer propagation by itself
|
||||
does not make stale data architecturally visible. Stale data must be propagated
|
||||
to a location where it is subject to reading or sampling.
|
||||
|
||||
Sideband Stale Data Propagator (SSDP)
|
||||
-------------------------------------
|
||||
The sideband stale data propagator (SSDP) is limited to the client (including
|
||||
Intel Xeon server E3) uncore implementation. The sideband response buffer is
|
||||
shared by all client cores. For non-coherent reads that go to sideband
|
||||
destinations, the uncore logic returns 64 bytes of data to the core, including
|
||||
both requested data and unrequested stale data, from a transaction buffer and
|
||||
the sideband response buffer. As a result, stale data from the sideband
|
||||
response and transaction buffers may now reside in a core fill buffer.
|
||||
|
||||
Primary Stale Data Propagator (PSDP)
|
||||
------------------------------------
|
||||
The primary stale data propagator (PSDP) is limited to the client (including
|
||||
Intel Xeon server E3) uncore implementation. Similar to the sideband response
|
||||
buffer, the primary response buffer is shared by all client cores. For some
|
||||
processors, MMIO primary reads will return 64 bytes of data to the core fill
|
||||
buffer including both requested data and unrequested stale data. This is
|
||||
similar to the sideband stale data propagator.
|
||||
|
||||
Vulnerabilities
|
||||
===============
|
||||
Device Register Partial Write (DRPW) (CVE-2022-21166)
|
||||
-----------------------------------------------------
|
||||
Some endpoint MMIO registers incorrectly handle writes that are smaller than
|
||||
the register size. Instead of aborting the write or only copying the correct
|
||||
subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than
|
||||
specified by the write transaction may be written to the register. On
|
||||
processors affected by FBSDP, this may expose stale data from the fill buffers
|
||||
of the core that created the write transaction.
|
||||
|
||||
Shared Buffers Data Sampling (SBDS) (CVE-2022-21125)
|
||||
----------------------------------------------------
|
||||
After propagators may have moved data around the uncore and copied stale data
|
||||
into client core fill buffers, processors affected by MFBDS can leak data from
|
||||
the fill buffer. It is limited to the client (including Intel Xeon server E3)
|
||||
uncore implementation.
|
||||
|
||||
Shared Buffers Data Read (SBDR) (CVE-2022-21123)
|
||||
------------------------------------------------
|
||||
It is similar to Shared Buffer Data Sampling (SBDS) except that the data is
|
||||
directly read into the architectural software-visible state. It is limited to
|
||||
the client (including Intel Xeon server E3) uncore implementation.
|
||||
|
||||
Affected Processors
|
||||
===================
|
||||
Not all the CPUs are affected by all the variants. For instance, most
|
||||
processors for the server market (excluding Intel Xeon E3 processors) are
|
||||
impacted by only Device Register Partial Write (DRPW).
|
||||
|
||||
Below is the list of affected Intel processors [#f1]_:
|
||||
|
||||
=================== ============ =========
|
||||
Common name Family_Model Steppings
|
||||
=================== ============ =========
|
||||
HASWELL_X 06_3FH 2,4
|
||||
SKYLAKE_L 06_4EH 3
|
||||
BROADWELL_X 06_4FH All
|
||||
SKYLAKE_X 06_55H 3,4,6,7,11
|
||||
BROADWELL_D 06_56H 3,4,5
|
||||
SKYLAKE 06_5EH 3
|
||||
ICELAKE_X 06_6AH 4,5,6
|
||||
ICELAKE_D 06_6CH 1
|
||||
ICELAKE_L 06_7EH 5
|
||||
ATOM_TREMONT_D 06_86H All
|
||||
LAKEFIELD 06_8AH 1
|
||||
KABYLAKE_L 06_8EH 9 to 12
|
||||
ATOM_TREMONT 06_96H 1
|
||||
ATOM_TREMONT_L 06_9CH 0
|
||||
KABYLAKE 06_9EH 9 to 13
|
||||
COMETLAKE 06_A5H 2,3,5
|
||||
COMETLAKE_L 06_A6H 0,1
|
||||
ROCKETLAKE 06_A7H 1
|
||||
=================== ============ =========
|
||||
|
||||
If a CPU is in the affected processor list, but not affected by a variant, it
|
||||
is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later
|
||||
section, mitigation largely remains the same for all the variants, i.e. to
|
||||
clear the CPU fill buffers via VERW instruction.
|
||||
|
||||
New bits in MSRs
|
||||
================
|
||||
Newer processors and microcode update on existing affected processors added new
|
||||
bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
|
||||
specific variants of Processor MMIO Stale Data vulnerabilities and mitigation
|
||||
capability.
|
||||
|
||||
MSR IA32_ARCH_CAPABILITIES
|
||||
--------------------------
|
||||
Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the
|
||||
Shared Buffers Data Read (SBDR) vulnerability or the sideband stale
|
||||
data propagator (SSDP).
|
||||
Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer
|
||||
Stale Data Propagator (FBSDP).
|
||||
Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data
|
||||
Propagator (PSDP).
|
||||
Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer
|
||||
values as part of MD_CLEAR operations. Processors that do not
|
||||
enumerate MDS_NO (meaning they are affected by MDS) but that do
|
||||
enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate
|
||||
FB_CLEAR as part of their MD_CLEAR support.
|
||||
Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR
|
||||
IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS
|
||||
bit can be set to cause the VERW instruction to not perform the
|
||||
FB_CLEAR action. Not all processors that support FB_CLEAR will support
|
||||
FB_CLEAR_CTRL.
|
||||
|
||||
MSR IA32_MCU_OPT_CTRL
|
||||
---------------------
|
||||
Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR
|
||||
action. This may be useful to reduce the performance impact of FB_CLEAR in
|
||||
cases where system software deems it warranted (for example, when performance
|
||||
is more critical, or the untrusted software has no MMIO access). Note that
|
||||
FB_CLEAR_DIS has no impact on enumeration (for example, it does not change
|
||||
FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors
|
||||
that enumerate FB_CLEAR.
|
||||
|
||||
Mitigation
|
||||
==========
|
||||
Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the
|
||||
same mitigation strategy to force the CPU to clear the affected buffers before
|
||||
an attacker can extract the secrets.
|
||||
|
||||
This is achieved by using the otherwise unused and obsolete VERW instruction in
|
||||
combination with a microcode update. The microcode clears the affected CPU
|
||||
buffers when the VERW instruction is executed.
|
||||
|
||||
Kernel reuses the MDS function to invoke the buffer clearing:
|
||||
|
||||
mds_clear_cpu_buffers()
|
||||
|
||||
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
|
||||
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
|
||||
additional mitigation is needed on such CPUs.
|
||||
|
||||
For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker
|
||||
with MMIO capability. Therefore, VERW is not required for kernel/userspace. For
|
||||
virtualization case, VERW is only needed at VMENTER for a guest with MMIO
|
||||
capability.
|
||||
|
||||
Mitigation points
|
||||
-----------------
|
||||
Return to user space
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation
|
||||
needed.
|
||||
|
||||
C-State transition
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Control register writes by CPU during C-state transition can propagate data
|
||||
from fill buffer to uncore buffers. Execute VERW before C-state transition to
|
||||
clear CPU fill buffers.
|
||||
|
||||
Guest entry point
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise
|
||||
execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by
|
||||
MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO
|
||||
Stale Data vulnerabilities, so there is no need to execute VERW for such guests.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
The kernel command line allows to control the Processor MMIO Stale Data
|
||||
mitigations at boot time with the option "mmio_stale_data=". The valid
|
||||
arguments for this option are:
|
||||
|
||||
========== =================================================================
|
||||
full If the CPU is vulnerable, enable mitigation; CPU buffer clearing
|
||||
on exit to userspace and when entering a VM. Idle transitions are
|
||||
protected as well. It does not automatically disable SMT.
|
||||
full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the
|
||||
complete mitigation.
|
||||
off Disables mitigation completely.
|
||||
========== =================================================================
|
||||
|
||||
If the CPU is affected and mmio_stale_data=off is not supplied on the kernel
|
||||
command line, then the kernel selects the appropriate mitigation.
|
||||
|
||||
Mitigation status information
|
||||
-----------------------------
|
||||
The Linux kernel provides a sysfs interface to enumerate the current
|
||||
vulnerability status of the system: whether the system is vulnerable, and
|
||||
which mitigations are active. The relevant sysfs file is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - 'Not affected'
|
||||
- The processor is not vulnerable
|
||||
* - 'Vulnerable'
|
||||
- The processor is vulnerable, but no mitigation enabled
|
||||
* - 'Vulnerable: Clear CPU buffers attempted, no microcode'
|
||||
- The processor is vulnerable, but microcode is not updated. The
|
||||
mitigation is enabled on a best effort basis.
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
||||
======================== ===========================================
|
||||
'SMT vulnerable' SMT is enabled
|
||||
'SMT disabled' SMT is disabled
|
||||
'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
|
||||
======================== ===========================================
|
||||
|
||||
References
|
||||
----------
|
||||
.. [#f1] Affected Processors
|
||||
https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
@ -2480,6 +2480,7 @@
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
no_entry_flush [PPC]
|
||||
no_uaccess_flush [PPC]
|
||||
mmio_stale_data=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -2501,6 +2502,7 @@
|
||||
Equivalent to: l1tf=flush,nosmt [X86]
|
||||
mds=full,nosmt [X86]
|
||||
tsx_async_abort=full,nosmt [X86]
|
||||
mmio_stale_data=full,nosmt [X86]
|
||||
|
||||
mminit_loglevel=
|
||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||
@ -2510,6 +2512,40 @@
|
||||
log everything. Information is printed at KERN_DEBUG
|
||||
so loglevel=8 may also need to be specified.
|
||||
|
||||
mmio_stale_data=
|
||||
[X86,INTEL] Control mitigation for the Processor
|
||||
MMIO Stale Data vulnerabilities.
|
||||
|
||||
Processor MMIO Stale Data is a class of
|
||||
vulnerabilities that may expose data after an MMIO
|
||||
operation. Exposed data could originate or end in
|
||||
the same CPU buffers as affected by MDS and TAA.
|
||||
Therefore, similar to MDS and TAA, the mitigation
|
||||
is to clear the affected CPU buffers.
|
||||
|
||||
This parameter controls the mitigation. The
|
||||
options are:
|
||||
|
||||
full - Enable mitigation on vulnerable CPUs
|
||||
|
||||
full,nosmt - Enable mitigation and disable SMT on
|
||||
vulnerable CPUs.
|
||||
|
||||
off - Unconditionally disable mitigation
|
||||
|
||||
On MDS or TAA affected machines,
|
||||
mmio_stale_data=off can be prevented by an active
|
||||
MDS or TAA mitigation as these vulnerabilities are
|
||||
mitigated with the same mechanism so in order to
|
||||
disable this mitigation, you need to specify
|
||||
mds=off and tsx_async_abort=off too.
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
mmio_stale_data=full.
|
||||
|
||||
For details see:
|
||||
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
|
||||
|
||||
module.sig_enforce
|
||||
[KNL] When CONFIG_MODULE_SIG is set, this means that
|
||||
modules without (valid) signatures will fail to load.
|
||||
@ -3522,6 +3558,18 @@
|
||||
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
|
||||
See Documentation/blockdev/ramdisk.txt.
|
||||
|
||||
random.trust_cpu={on,off}
|
||||
[KNL] Enable or disable trusting the use of the
|
||||
CPU's random number generator (if available) to
|
||||
fully seed the kernel's CRNG. Default is controlled
|
||||
by CONFIG_RANDOM_TRUST_CPU.
|
||||
|
||||
random.trust_bootloader={on,off}
|
||||
[KNL] Enable or disable trusting the use of a
|
||||
seed passed by the bootloader (if available) to
|
||||
fully seed the kernel's CRNG. Default is controlled
|
||||
by CONFIG_RANDOM_TRUST_BOOTLOADER.
|
||||
|
||||
ras=option[,option,...] [KNL] RAS-specific options
|
||||
|
||||
cec_disable [X86]
|
||||
|
@ -96,7 +96,7 @@ finally:
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
language = 'en'
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
|
@ -9,8 +9,9 @@ Required properties:
|
||||
- The second cell is reserved and is currently unused.
|
||||
- gpio-controller : Marks the device node as a GPIO controller.
|
||||
- interrupt-controller: Mark the device node as an interrupt controller
|
||||
- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
|
||||
- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
|
||||
- The first cell is the GPIO offset number within the GPIO controller.
|
||||
- The second cell is the interrupt trigger type and level flags.
|
||||
- interrupts: Specify the interrupt.
|
||||
- altr,interrupt-type: Specifies the interrupt trigger type the GPIO
|
||||
hardware is synthesized. This field is required if the Altera GPIO controller
|
||||
@ -38,6 +39,6 @@ gpio_altr: gpio@0xff200000 {
|
||||
altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-controller;
|
||||
};
|
||||
|
453
Documentation/filesystems/ext4/ondisk/directory.rst
Normal file
453
Documentation/filesystems/ext4/ondisk/directory.rst
Normal file
@ -0,0 +1,453 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Directory Entries
|
||||
-----------------
|
||||
|
||||
In an ext4 filesystem, a directory is more or less a flat file that maps
|
||||
an arbitrary byte string (usually ASCII) to an inode number on the
|
||||
filesystem. There can be many directory entries across the filesystem
|
||||
that reference the same inode number--these are known as hard links, and
|
||||
that is why hard links cannot reference files on other filesystems. As
|
||||
such, directory entries are found by reading the data block(s)
|
||||
associated with a directory file for the particular directory entry that
|
||||
is desired.
|
||||
|
||||
Linear (Classic) Directories
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
By default, each directory lists its entries in an “almost-linear”
|
||||
array. I write “almost” because it's not a linear array in the memory
|
||||
sense because directory entries are not split across filesystem blocks.
|
||||
Therefore, it is more accurate to say that a directory is a series of
|
||||
data blocks and that each block contains a linear array of directory
|
||||
entries. The end of each per-block array is signified by reaching the
|
||||
end of the block; the last entry in the block has a record length that
|
||||
takes it all the way to the end of the block. The end of the entire
|
||||
directory is of course signified by reaching the end of the file. Unused
|
||||
directory entries are signified by inode = 0. By default the filesystem
|
||||
uses ``struct ext4_dir_entry_2`` for directory entries unless the
|
||||
“filetype” feature flag is not set, in which case it uses
|
||||
``struct ext4_dir_entry``.
|
||||
|
||||
The original directory entry format is ``struct ext4_dir_entry``, which
|
||||
is at most 263 bytes long, though on disk you'll need to reference
|
||||
``dirent.rec_len`` to know for sure.
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Size
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- inode
|
||||
- Number of the inode that this directory entry points to.
|
||||
* - 0x4
|
||||
- \_\_le16
|
||||
- rec\_len
|
||||
- Length of this directory entry. Must be a multiple of 4.
|
||||
* - 0x6
|
||||
- \_\_le16
|
||||
- name\_len
|
||||
- Length of the file name.
|
||||
* - 0x8
|
||||
- char
|
||||
- name[EXT4\_NAME\_LEN]
|
||||
- File name.
|
||||
|
||||
Since file names cannot be longer than 255 bytes, the new directory
|
||||
entry format shortens the rec\_len field and uses the space for a file
|
||||
type flag, probably to avoid having to load every inode during directory
|
||||
tree traversal. This format is ``ext4_dir_entry_2``, which is at most
|
||||
263 bytes long, though on disk you'll need to reference
|
||||
``dirent.rec_len`` to know for sure.
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Size
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- inode
|
||||
- Number of the inode that this directory entry points to.
|
||||
* - 0x4
|
||||
- \_\_le16
|
||||
- rec\_len
|
||||
- Length of this directory entry.
|
||||
* - 0x6
|
||||
- \_\_u8
|
||||
- name\_len
|
||||
- Length of the file name.
|
||||
* - 0x7
|
||||
- \_\_u8
|
||||
- file\_type
|
||||
- File type code, see ftype_ table below.
|
||||
* - 0x8
|
||||
- char
|
||||
- name[EXT4\_NAME\_LEN]
|
||||
- File name.
|
||||
|
||||
.. _ftype:
|
||||
|
||||
The directory file type is one of the following values:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 79
|
||||
:header-rows: 1
|
||||
|
||||
* - Value
|
||||
- Description
|
||||
* - 0x0
|
||||
- Unknown.
|
||||
* - 0x1
|
||||
- Regular file.
|
||||
* - 0x2
|
||||
- Directory.
|
||||
* - 0x3
|
||||
- Character device file.
|
||||
* - 0x4
|
||||
- Block device file.
|
||||
* - 0x5
|
||||
- FIFO.
|
||||
* - 0x6
|
||||
- Socket.
|
||||
* - 0x7
|
||||
- Symbolic link.
|
||||
|
||||
To support directories that are both encrypted and casefolded directories, we
|
||||
must also include hash information in the directory entry. We append
|
||||
``ext4_extended_dir_entry_2`` to ``ext4_dir_entry_2`` except for the entries
|
||||
for dot and dotdot, which are kept the same. The structure follows immediately
|
||||
after ``name`` and is included in the size listed by ``rec_len`` If a directory
|
||||
entry uses this extension, it may be up to 271 bytes.
|
||||
|
||||
.. list-table::
|
||||
:widths: 8 8 24 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Size
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- hash
|
||||
- The hash of the directory name
|
||||
* - 0x4
|
||||
- \_\_le32
|
||||
- minor\_hash
|
||||
- The minor hash of the directory name
|
||||
|
||||
|
||||
In order to add checksums to these classic directory blocks, a phony
|
||||
``struct ext4_dir_entry`` is placed at the end of each leaf block to
|
||||
hold the checksum. The directory entry is 12 bytes long. The inode
|
||||
number and name\_len fields are set to zero to fool old software into
|
||||
ignoring an apparently empty directory entry, and the checksum is stored
|
||||
in the place where the name normally goes. The structure is
|
||||
``struct ext4_dir_entry_tail``:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Size
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- det\_reserved\_zero1
|
||||
- Inode number, which must be zero.
|
||||
* - 0x4
|
||||
- \_\_le16
|
||||
- det\_rec\_len
|
||||
- Length of this directory entry, which must be 12.
|
||||
* - 0x6
|
||||
- \_\_u8
|
||||
- det\_reserved\_zero2
|
||||
- Length of the file name, which must be zero.
|
||||
* - 0x7
|
||||
- \_\_u8
|
||||
- det\_reserved\_ft
|
||||
- File type, which must be 0xDE.
|
||||
* - 0x8
|
||||
- \_\_le32
|
||||
- det\_checksum
|
||||
- Directory leaf block checksum.
|
||||
|
||||
The leaf directory block checksum is calculated against the FS UUID, the
|
||||
directory's inode number, the directory's inode generation number, and
|
||||
the entire directory entry block up to (but not including) the fake
|
||||
directory entry.
|
||||
|
||||
Hash Tree Directories
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A linear array of directory entries isn't great for performance, so a
|
||||
new feature was added to ext3 to provide a faster (but peculiar)
|
||||
balanced tree keyed off a hash of the directory entry name. If the
|
||||
EXT4\_INDEX\_FL (0x1000) flag is set in the inode, this directory uses a
|
||||
hashed btree (htree) to organize and find directory entries. For
|
||||
backwards read-only compatibility with ext2, this tree is actually
|
||||
hidden inside the directory file, masquerading as “empty” directory data
|
||||
blocks! It was stated previously that the end of the linear directory
|
||||
entry table was signified with an entry pointing to inode 0; this is
|
||||
(ab)used to fool the old linear-scan algorithm into thinking that the
|
||||
rest of the directory block is empty so that it moves on.
|
||||
|
||||
The root of the tree always lives in the first data block of the
|
||||
directory. By ext2 custom, the '.' and '..' entries must appear at the
|
||||
beginning of this first block, so they are put here as two
|
||||
``struct ext4_dir_entry_2``\ s and not stored in the tree. The rest of
|
||||
the root node contains metadata about the tree and finally a hash->block
|
||||
map to find nodes that are lower in the htree. If
|
||||
``dx_root.info.indirect_levels`` is non-zero then the htree has two
|
||||
levels; the data block pointed to by the root node's map is an interior
|
||||
node, which is indexed by a minor hash. Interior nodes in this tree
|
||||
contains a zeroed out ``struct ext4_dir_entry_2`` followed by a
|
||||
minor\_hash->block map to find leafe nodes. Leaf nodes contain a linear
|
||||
array of all ``struct ext4_dir_entry_2``; all of these entries
|
||||
(presumably) hash to the same value. If there is an overflow, the
|
||||
entries simply overflow into the next leaf node, and the
|
||||
least-significant bit of the hash (in the interior node map) that gets
|
||||
us to this next leaf node is set.
|
||||
|
||||
To traverse the directory as a htree, the code calculates the hash of
|
||||
the desired file name and uses it to find the corresponding block
|
||||
number. If the tree is flat, the block is a linear array of directory
|
||||
entries that can be searched; otherwise, the minor hash of the file name
|
||||
is computed and used against this second block to find the corresponding
|
||||
third block number. That third block number will be a linear array of
|
||||
directory entries.
|
||||
|
||||
To traverse the directory as a linear array (such as the old code does),
|
||||
the code simply reads every data block in the directory. The blocks used
|
||||
for the htree will appear to have no entries (aside from '.' and '..')
|
||||
and so only the leaf nodes will appear to have any interesting content.
|
||||
|
||||
The root of the htree is in ``struct dx_root``, which is the full length
|
||||
of a data block:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Type
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- dot.inode
|
||||
- inode number of this directory.
|
||||
* - 0x4
|
||||
- \_\_le16
|
||||
- dot.rec\_len
|
||||
- Length of this record, 12.
|
||||
* - 0x6
|
||||
- u8
|
||||
- dot.name\_len
|
||||
- Length of the name, 1.
|
||||
* - 0x7
|
||||
- u8
|
||||
- dot.file\_type
|
||||
- File type of this entry, 0x2 (directory) (if the feature flag is set).
|
||||
* - 0x8
|
||||
- char
|
||||
- dot.name[4]
|
||||
- “.\\0\\0\\0”
|
||||
* - 0xC
|
||||
- \_\_le32
|
||||
- dotdot.inode
|
||||
- inode number of parent directory.
|
||||
* - 0x10
|
||||
- \_\_le16
|
||||
- dotdot.rec\_len
|
||||
- block\_size - 12. The record length is long enough to cover all htree
|
||||
data.
|
||||
* - 0x12
|
||||
- u8
|
||||
- dotdot.name\_len
|
||||
- Length of the name, 2.
|
||||
* - 0x13
|
||||
- u8
|
||||
- dotdot.file\_type
|
||||
- File type of this entry, 0x2 (directory) (if the feature flag is set).
|
||||
* - 0x14
|
||||
- char
|
||||
- dotdot\_name[4]
|
||||
- “..\\0\\0”
|
||||
* - 0x18
|
||||
- \_\_le32
|
||||
- struct dx\_root\_info.reserved\_zero
|
||||
- Zero.
|
||||
* - 0x1C
|
||||
- u8
|
||||
- struct dx\_root\_info.hash\_version
|
||||
- Hash type, see dirhash_ table below.
|
||||
* - 0x1D
|
||||
- u8
|
||||
- struct dx\_root\_info.info\_length
|
||||
- Length of the tree information, 0x8.
|
||||
* - 0x1E
|
||||
- u8
|
||||
- struct dx\_root\_info.indirect\_levels
|
||||
- Depth of the htree. Cannot be larger than 3 if the INCOMPAT\_LARGEDIR
|
||||
feature is set; cannot be larger than 2 otherwise.
|
||||
* - 0x1F
|
||||
- u8
|
||||
- struct dx\_root\_info.unused\_flags
|
||||
-
|
||||
* - 0x20
|
||||
- \_\_le16
|
||||
- limit
|
||||
- Maximum number of dx\_entries that can follow this header, plus 1 for
|
||||
the header itself.
|
||||
* - 0x22
|
||||
- \_\_le16
|
||||
- count
|
||||
- Actual number of dx\_entries that follow this header, plus 1 for the
|
||||
header itself.
|
||||
* - 0x24
|
||||
- \_\_le32
|
||||
- block
|
||||
- The block number (within the directory file) that goes with hash=0.
|
||||
* - 0x28
|
||||
- struct dx\_entry
|
||||
- entries[0]
|
||||
- As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
|
||||
|
||||
.. _dirhash:
|
||||
|
||||
The directory hash is one of the following values:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 79
|
||||
:header-rows: 1
|
||||
|
||||
* - Value
|
||||
- Description
|
||||
* - 0x0
|
||||
- Legacy.
|
||||
* - 0x1
|
||||
- Half MD4.
|
||||
* - 0x2
|
||||
- Tea.
|
||||
* - 0x3
|
||||
- Legacy, unsigned.
|
||||
* - 0x4
|
||||
- Half MD4, unsigned.
|
||||
* - 0x5
|
||||
- Tea, unsigned.
|
||||
* - 0x6
|
||||
- Siphash.
|
||||
|
||||
Interior nodes of an htree are recorded as ``struct dx_node``, which is
|
||||
also the full length of a data block:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Type
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- fake.inode
|
||||
- Zero, to make it look like this entry is not in use.
|
||||
* - 0x4
|
||||
- \_\_le16
|
||||
- fake.rec\_len
|
||||
- The size of the block, in order to hide all of the dx\_node data.
|
||||
* - 0x6
|
||||
- u8
|
||||
- name\_len
|
||||
- Zero. There is no name for this “unused” directory entry.
|
||||
* - 0x7
|
||||
- u8
|
||||
- file\_type
|
||||
- Zero. There is no file type for this “unused” directory entry.
|
||||
* - 0x8
|
||||
- \_\_le16
|
||||
- limit
|
||||
- Maximum number of dx\_entries that can follow this header, plus 1 for
|
||||
the header itself.
|
||||
* - 0xA
|
||||
- \_\_le16
|
||||
- count
|
||||
- Actual number of dx\_entries that follow this header, plus 1 for the
|
||||
header itself.
|
||||
* - 0xE
|
||||
- \_\_le32
|
||||
- block
|
||||
- The block number (within the directory file) that goes with the lowest
|
||||
hash value of this block. This value is stored in the parent block.
|
||||
* - 0x12
|
||||
- struct dx\_entry
|
||||
- entries[0]
|
||||
- As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
|
||||
|
||||
The hash maps that exist in both ``struct dx_root`` and
|
||||
``struct dx_node`` are recorded as ``struct dx_entry``, which is 8 bytes
|
||||
long:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Type
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- \_\_le32
|
||||
- hash
|
||||
- Hash code.
|
||||
* - 0x4
|
||||
- \_\_le32
|
||||
- block
|
||||
- Block number (within the directory file, not filesystem blocks) of the
|
||||
next node in the htree.
|
||||
|
||||
(If you think this is all quite clever and peculiar, so does the
|
||||
author.)
|
||||
|
||||
If metadata checksums are enabled, the last 8 bytes of the directory
|
||||
block (precisely the length of one dx\_entry) are used to store a
|
||||
``struct dx_tail``, which contains the checksum. The ``limit`` and
|
||||
``count`` entries in the dx\_root/dx\_node structures are adjusted as
|
||||
necessary to fit the dx\_tail into the block. If there is no space for
|
||||
the dx\_tail, the user is notified to run e2fsck -D to rebuild the
|
||||
directory index (which will ensure that there's space for the checksum.
|
||||
The dx\_tail structure is 8 bytes long and looks like this:
|
||||
|
||||
.. list-table::
|
||||
:widths: 1 1 1 77
|
||||
:header-rows: 1
|
||||
|
||||
* - Offset
|
||||
- Type
|
||||
- Name
|
||||
- Description
|
||||
* - 0x0
|
||||
- u32
|
||||
- dt\_reserved
|
||||
- Zero.
|
||||
* - 0x4
|
||||
- \_\_le32
|
||||
- dt\_checksum
|
||||
- Checksum of the htree directory block.
|
||||
|
||||
The checksum is calculated against the FS UUID, the htree index header
|
||||
(dx\_root or dx\_node), all of the htree indices (dx\_entry) that are in
|
||||
use, and the tail block (dx\_tail).
|
@ -808,9 +808,40 @@ The kernel command line parameter printk.devkmsg= overrides this and is
|
||||
a one-time setting until next reboot: once set, it cannot be changed by
|
||||
this sysctl interface anymore.
|
||||
|
||||
==============================================================
|
||||
pty
|
||||
===
|
||||
|
||||
randomize_va_space:
|
||||
See Documentation/filesystems/devpts.rst.
|
||||
|
||||
|
||||
random
|
||||
======
|
||||
|
||||
This is a directory, with the following entries:
|
||||
|
||||
* ``boot_id``: a UUID generated the first time this is retrieved, and
|
||||
unvarying after that;
|
||||
|
||||
* ``uuid``: a UUID generated every time this is retrieved (this can
|
||||
thus be used to generate UUIDs at will);
|
||||
|
||||
* ``entropy_avail``: the pool's entropy count, in bits;
|
||||
|
||||
* ``poolsize``: the entropy pool size, in bits;
|
||||
|
||||
* ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
|
||||
number of seconds between urandom pool reseeding). This file is
|
||||
writable for compatibility purposes, but writing to it has no effect
|
||||
on any RNG behavior;
|
||||
|
||||
* ``write_wakeup_threshold``: when the entropy count drops below this
|
||||
(as a number of bits), processes waiting to write to ``/dev/random``
|
||||
are woken up. This file is writable for compatibility purposes, but
|
||||
writing to it has no effect on any RNG behavior.
|
||||
|
||||
|
||||
randomize_va_space
|
||||
==================
|
||||
|
||||
This option can be used to select the type of process address
|
||||
space randomization that is used in the system, for architectures
|
||||
|
@ -11349,6 +11349,7 @@ F: drivers/block/brd.c
|
||||
|
||||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 282
|
||||
SUBLEVEL = 285
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
|
||||
__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
|
||||
return ret;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif
|
||||
|
@ -48,18 +48,17 @@
|
||||
"GPIO18",
|
||||
"NC", /* GPIO19 */
|
||||
"NC", /* GPIO20 */
|
||||
"GPIO21",
|
||||
"CAM_GPIO0",
|
||||
"GPIO22",
|
||||
"GPIO23",
|
||||
"GPIO24",
|
||||
"GPIO25",
|
||||
"NC", /* GPIO26 */
|
||||
"CAM_GPIO0",
|
||||
/* Binary number representing build/revision */
|
||||
"CONFIG0",
|
||||
"CONFIG1",
|
||||
"CONFIG2",
|
||||
"CONFIG3",
|
||||
"GPIO27",
|
||||
"GPIO28",
|
||||
"GPIO29",
|
||||
"GPIO30",
|
||||
"GPIO31",
|
||||
"NC", /* GPIO32 */
|
||||
"NC", /* GPIO33 */
|
||||
"NC", /* GPIO34 */
|
||||
|
@ -77,16 +77,18 @@
|
||||
"GPIO27",
|
||||
"SDA0",
|
||||
"SCL0",
|
||||
"NC", /* GPIO30 */
|
||||
"NC", /* GPIO31 */
|
||||
"NC", /* GPIO32 */
|
||||
"NC", /* GPIO33 */
|
||||
"NC", /* GPIO34 */
|
||||
"NC", /* GPIO35 */
|
||||
"NC", /* GPIO36 */
|
||||
"NC", /* GPIO37 */
|
||||
"NC", /* GPIO38 */
|
||||
"NC", /* GPIO39 */
|
||||
/* Used by BT module */
|
||||
"CTS0",
|
||||
"RTS0",
|
||||
"TXD0",
|
||||
"RXD0",
|
||||
/* Used by Wifi */
|
||||
"SD1_CLK",
|
||||
"SD1_CMD",
|
||||
"SD1_DATA0",
|
||||
"SD1_DATA1",
|
||||
"SD1_DATA2",
|
||||
"SD1_DATA3",
|
||||
"CAM_GPIO1", /* GPIO40 */
|
||||
"WL_ON", /* GPIO41 */
|
||||
"NC", /* GPIO42 */
|
||||
|
@ -128,7 +128,7 @@
|
||||
samsung,i2c-max-bus-freq = <20000>;
|
||||
|
||||
eeprom@50 {
|
||||
compatible = "samsung,s524ad0xd1";
|
||||
compatible = "samsung,s524ad0xd1", "atmel,24c128";
|
||||
reg = <0x50>;
|
||||
};
|
||||
|
||||
@ -287,7 +287,7 @@
|
||||
samsung,i2c-max-bus-freq = <20000>;
|
||||
|
||||
eeprom@51 {
|
||||
compatible = "samsung,s524ad0xd1";
|
||||
compatible = "samsung,s524ad0xd1", "atmel,24c128";
|
||||
reg = <0x51>;
|
||||
};
|
||||
|
||||
|
@ -286,7 +286,7 @@
|
||||
clocks = <&armclk>;
|
||||
};
|
||||
|
||||
gic: gic@1000 {
|
||||
gic: interrupt-controller@1000 {
|
||||
compatible = "arm,arm11mp-gic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <3>;
|
||||
|
@ -14,5 +14,6 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
|
||||
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
|
||||
|
||||
#endif
|
||||
|
@ -70,14 +70,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
ctrl_base = of_iomap(np, 0);
|
||||
if (!ctrl_base) {
|
||||
of_node_put(np);
|
||||
pr_err("failed to map address\n");
|
||||
return;
|
||||
}
|
||||
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
|
||||
of_node_put(np);
|
||||
pr_err("failed to find smp-offset property\n");
|
||||
return;
|
||||
}
|
||||
ctrl_base += offset;
|
||||
of_node_put(np);
|
||||
}
|
||||
}
|
||||
|
||||
@ -163,6 +166,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
if (WARN_ON(!node))
|
||||
return -1;
|
||||
ctrl_base = of_iomap(node, 0);
|
||||
of_node_put(node);
|
||||
|
||||
/* set the secondary core boot from DDR */
|
||||
remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);
|
||||
|
@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(clockfw_lock);
|
||||
unsigned long omap1_uart_recalc(struct clk *clk)
|
||||
{
|
||||
unsigned int val = __raw_readl(clk->enable_reg);
|
||||
return val & clk->enable_bit ? 48000000 : 12000000;
|
||||
return val & 1 << clk->enable_bit ? 48000000 : 12000000;
|
||||
}
|
||||
|
||||
unsigned long omap1_sossi_recalc(struct clk *clk)
|
||||
|
@ -146,6 +146,7 @@ static int __init dcscb_init(void)
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
dcscb_base = of_iomap(node, 0);
|
||||
of_node_put(node);
|
||||
if (!dcscb_base)
|
||||
return -EADDRNOTAVAIL;
|
||||
cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
|
||||
|
@ -181,7 +181,7 @@
|
||||
clocks {
|
||||
sleep_clk: sleep_clk {
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <32000>;
|
||||
clock-frequency = <32768>;
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
|
||||
|
@ -72,7 +72,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned long pc = rec->ip;
|
||||
u32 old, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
long offset = (long)addr - (long)pc;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
@ -151,7 +151,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long pc = rec->ip;
|
||||
bool validate = true;
|
||||
u32 old = 0, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
long offset = (long)addr - (long)pc;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
|
@ -39,6 +39,7 @@ get_cycles (void)
|
||||
ret = ia64_getreg(_IA64_REG_AR_ITC);
|
||||
return ret;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
extern void ia64_cpu_local_tick (void);
|
||||
extern unsigned long long ia64_native_sched_clock (void);
|
||||
|
@ -308,7 +308,7 @@ comment "Processor Specific Options"
|
||||
|
||||
config M68KFPU_EMU
|
||||
bool "Math emulation support"
|
||||
depends on MMU
|
||||
depends on M68KCLASSIC && FPU
|
||||
help
|
||||
At some point in the future, this will cause floating-point math
|
||||
instructions to be emulated by the kernel on machines that lack a
|
||||
|
@ -309,6 +309,7 @@ comment "Machine Options"
|
||||
|
||||
config UBOOT
|
||||
bool "Support for U-Boot command line parameters"
|
||||
depends on COLDFIRE
|
||||
help
|
||||
If you say Y here kernel will try to collect command
|
||||
line parameters from the initial u-boot stack.
|
||||
|
@ -42,7 +42,8 @@ extern void paging_init(void);
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
||||
extern void *empty_zero_page;
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
/*
|
||||
* No page table caches to initialise.
|
||||
|
@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
if (mach_random_get_entropy)
|
||||
return mach_random_get_entropy();
|
||||
return 0;
|
||||
return random_get_entropy_fallback();
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#define cpu_has_6k_cache 0
|
||||
#define cpu_has_8k_cache 0
|
||||
#define cpu_has_tx39_cache 0
|
||||
#define cpu_has_fpu 1
|
||||
#define cpu_has_nofpuex 0
|
||||
#define cpu_has_32fpr 1
|
||||
#define cpu_has_counter 1
|
||||
|
@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
|
||||
else
|
||||
return 0; /* no usable counter */
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
/*
|
||||
* Like get_cycles - but where c0_count is not available we desperately
|
||||
* use c0_random in an attempt to get at least a little bit of entropy.
|
||||
*
|
||||
* R6000 and R6000A neither have a count register nor a random register.
|
||||
* That leaves no entropy source in the CPU itself.
|
||||
*/
|
||||
static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
unsigned int prid = read_c0_prid();
|
||||
unsigned int imp = prid & PRID_IMP_MASK;
|
||||
unsigned int c0_random;
|
||||
|
||||
if (can_use_mips_counter(prid))
|
||||
if (can_use_mips_counter(read_c0_prid()))
|
||||
return read_c0_count();
|
||||
else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
|
||||
return read_c0_random();
|
||||
|
||||
if (cpu_has_3kex)
|
||||
c0_random = (read_c0_random() >> 8) & 0x3f;
|
||||
else
|
||||
return 0; /* no usable register */
|
||||
c0_random = read_c0_random() & 0x3f;
|
||||
return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
|
@ -31,6 +31,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
|
||||
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
|
||||
if (cpc_node) {
|
||||
err = of_address_to_resource(cpc_node, 0, &res);
|
||||
of_node_put(cpc_node);
|
||||
if (!err)
|
||||
return res.start;
|
||||
}
|
||||
|
@ -20,5 +20,8 @@
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
extern cycles_t get_cycles(void);
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback())
|
||||
|
||||
#endif
|
||||
|
@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return mfspr(SPR_TTCR);
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
/* This isn't really used any more */
|
||||
#define CLOCK_TICK_RATE 1000
|
||||
|
@ -459,6 +459,15 @@ _start:
|
||||
l.ori r3,r0,0x1
|
||||
l.mtspr r0,r3,SPR_SR
|
||||
|
||||
/*
|
||||
* Start the TTCR as early as possible, so that the RNG can make use of
|
||||
* measurements of boot time from the earliest opportunity. Especially
|
||||
* important is that the TTCR does not return zero by the time we reach
|
||||
* rand_initialize().
|
||||
*/
|
||||
l.movhi r3,hi(SPR_TTMR_CR)
|
||||
l.mtspr r0,r3,SPR_TTMR
|
||||
|
||||
CLEAR_GPR(r1)
|
||||
CLEAR_GPR(r2)
|
||||
CLEAR_GPR(r3)
|
||||
|
@ -12,9 +12,10 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline cycles_t get_cycles (void)
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return mfctl(16);
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif
|
||||
|
@ -6,27 +6,28 @@
|
||||
|
||||
#include <asm/machdep.h>
|
||||
|
||||
static inline int arch_get_random_long(unsigned long *v)
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int arch_get_random_int(unsigned int *v)
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int arch_get_random_seed_long(unsigned long *v)
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (ppc_md.get_random_seed)
|
||||
return ppc_md.get_random_seed(v);
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
static inline int arch_get_random_seed_int(unsigned int *v)
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
unsigned long val;
|
||||
int rc;
|
||||
bool rc;
|
||||
|
||||
rc = arch_get_random_seed_long(&val);
|
||||
if (rc)
|
||||
@ -34,16 +35,6 @@ static inline int arch_get_random_seed_int(unsigned int *v)
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int arch_has_random(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int arch_has_random_seed(void)
|
||||
{
|
||||
return !!ppc_md.get_random_seed;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
|
||||
#ifdef CONFIG_PPC_POWERNV
|
||||
|
@ -50,6 +50,7 @@ static inline cycles_t get_cycles(void)
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_TIMEX_H */
|
||||
|
@ -41,7 +41,7 @@ static int __init powersave_off(char *arg)
|
||||
{
|
||||
ppc_md.power_save = NULL;
|
||||
cpuidle_disable = IDLE_POWERSAVE_OFF;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("powersave=off", powersave_off);
|
||||
|
||||
|
@ -2920,8 +2920,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
|
||||
} else {
|
||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||
sizeof(long));
|
||||
}
|
||||
else
|
||||
tmp = child->thread.fp_state.fpscr;
|
||||
}
|
||||
@ -2953,8 +2958,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
|
||||
} else {
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||
sizeof(long));
|
||||
}
|
||||
else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
ret = 0;
|
||||
|
@ -324,7 +324,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
||||
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
|
||||
mask |= CNST_THRESH_MASK;
|
||||
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
|
||||
}
|
||||
} else if (event_is_threshold(event))
|
||||
return -1;
|
||||
} else {
|
||||
/*
|
||||
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
|
||||
|
@ -341,6 +341,6 @@ late_initcall(cpm_init);
|
||||
static int __init cpm_powersave_off(char *arg)
|
||||
{
|
||||
cpm.powersave_off = 1;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("powersave=off", cpm_powersave_off);
|
||||
|
@ -291,6 +291,7 @@ cpm_setbrg(uint brg, uint rate)
|
||||
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
|
||||
CPM_BRG_EN | CPM_BRG_DIV16);
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_setbrg);
|
||||
|
||||
struct cpm_ioport16 {
|
||||
__be16 dir, par, odr_sor, dat, intr;
|
||||
|
@ -509,8 +509,10 @@ int fsl_rio_setup(struct platform_device *dev)
|
||||
if (rc) {
|
||||
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
|
||||
rmu_node);
|
||||
of_node_put(rmu_node);
|
||||
goto err_rmu;
|
||||
}
|
||||
of_node_put(rmu_node);
|
||||
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
|
||||
if (!rmu_regs_win) {
|
||||
dev_err(&dev->dev, "Unable to map rmu register window\n");
|
||||
|
@ -199,6 +199,7 @@ int icp_opal_init(void)
|
||||
|
||||
printk("XICS: Using OPAL ICP fallbacks\n");
|
||||
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,18 +26,6 @@ static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
|
||||
atomic64_add(nbytes, &s390_arch_random_counter);
|
||||
}
|
||||
|
||||
static inline bool arch_has_random(void)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_has_random_seed(void)
|
||||
{
|
||||
return arch_has_random();
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
|
@ -50,9 +50,16 @@ static inline bool test_preempt_need_resched(void)
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
||||
/*
|
||||
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
|
||||
* enabled, gcc 12 fails to handle __builtin_constant_p().
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
|
||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||
else
|
||||
return;
|
||||
}
|
||||
}
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
|
@ -177,6 +177,7 @@ static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return (cycles_t) get_tod_clock() >> 2;
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
int get_phys_clock(unsigned long *clock);
|
||||
void init_cpu_timer(void);
|
||||
|
@ -684,7 +684,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
|
||||
ptev = pte_val(*ptep);
|
||||
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -9,8 +9,6 @@
|
||||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
/* XXX Maybe do something better at some point... -DaveM */
|
||||
typedef unsigned long cycles_t;
|
||||
#define get_cycles() (0)
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif
|
||||
|
@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
||||
unsigned long *stack_out)
|
||||
{
|
||||
struct winch_data data;
|
||||
int fds[2], n, err;
|
||||
int fds[2], n, err, pid;
|
||||
char c;
|
||||
|
||||
err = os_pipe(fds, 1, 1);
|
||||
@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
||||
* problem with /dev/net/tun, which if held open by this
|
||||
* thread, prevents the TUN/TAP device from being reused.
|
||||
*/
|
||||
err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
|
||||
if (err < 0) {
|
||||
pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
|
||||
if (pid < 0) {
|
||||
err = pid;
|
||||
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
|
||||
-err);
|
||||
goto out_close;
|
||||
@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
return err;
|
||||
return pid;
|
||||
|
||||
out_close:
|
||||
close(fds[1]);
|
||||
|
@ -2,13 +2,8 @@
|
||||
#ifndef __UM_TIMEX_H
|
||||
#define __UM_TIMEX_H
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline cycles_t get_cycles (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CLOCK_TICK_RATE (HZ)
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif
|
||||
|
@ -328,7 +328,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
static __init int vdso_setup(char *s)
|
||||
{
|
||||
vdso64_enabled = simple_strtoul(s, NULL, 0);
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("vdso=", vdso_setup);
|
||||
#endif
|
||||
|
@ -16,7 +16,19 @@
|
||||
|
||||
/* Asm macros */
|
||||
|
||||
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
|
||||
/*
|
||||
* ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
|
||||
* It is required to prevent data loss.
|
||||
*
|
||||
* While running inside virtual machine, the kernel can bypass cache flushing.
|
||||
* Changing sleep state in a virtual machine doesn't affect the host system
|
||||
* sleep state and cannot lead to data loss.
|
||||
*/
|
||||
#define ACPI_FLUSH_CPU_CACHE() \
|
||||
do { \
|
||||
if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \
|
||||
wbinvd(); \
|
||||
} while (0)
|
||||
|
||||
int __acpi_acquire_global_lock(unsigned int *lock);
|
||||
int __acpi_release_global_lock(unsigned int *lock);
|
||||
|
@ -86,10 +86,6 @@ static inline bool rdseed_int(unsigned int *v)
|
||||
return ok;
|
||||
}
|
||||
|
||||
/* Conditional execution based on CPU type */
|
||||
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
|
||||
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
|
||||
|
||||
/*
|
||||
* These are the generic interfaces; they must not be declared if the
|
||||
* stubs in <linux/random.h> are to be invoked,
|
||||
@ -99,22 +95,22 @@ static inline bool rdseed_int(unsigned int *v)
|
||||
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return arch_has_random() ? rdrand_long(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return arch_has_random() ? rdrand_int(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_long(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_int(v) : false;
|
||||
return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
|
||||
}
|
||||
|
||||
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
|
||||
|
@ -393,5 +393,6 @@
|
||||
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -10,6 +10,10 @@
|
||||
*
|
||||
* Things ending in "2" are usually because we have no better
|
||||
* name for them. There's no processor called "SILVERMONT2".
|
||||
*
|
||||
* While adding a new CPUID for a new microarchitecture, add a new
|
||||
* group to keep logically sorted out in chronological order. Within
|
||||
* that group keep the CPUID for the variants sorted by model number.
|
||||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
@ -49,6 +53,24 @@
|
||||
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
|
||||
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
|
||||
|
||||
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
||||
|
||||
#define INTEL_FAM6_ICELAKE_X 0x6A
|
||||
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
|
||||
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
|
||||
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
||||
|
||||
#define INTEL_FAM6_COMETLAKE 0xA5
|
||||
#define INTEL_FAM6_COMETLAKE_L 0xA6
|
||||
|
||||
#define INTEL_FAM6_ROCKETLAKE 0xA7
|
||||
|
||||
/* Hybrid Core/Atom Processors */
|
||||
|
||||
#define INTEL_FAM6_LAKEFIELD 0x8A
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
@ -68,7 +90,10 @@
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
|
||||
|
||||
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
|
||||
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
|
||||
#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
|
@ -96,6 +96,30 @@
|
||||
* Not susceptible to
|
||||
* TSX Async Abort (TAA) vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
|
||||
* Not susceptible to SBDR and SSDP
|
||||
* variants of Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FBSDP_NO BIT(14) /*
|
||||
* Not susceptible to FBSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_PSDP_NO BIT(15) /*
|
||||
* Not susceptible to PSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR BIT(17) /*
|
||||
* VERW clears CPU fill buffer
|
||||
* even on MDS_NO CPUs.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
|
||||
* MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
|
||||
* bit available to control VERW
|
||||
* behavior.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
@ -113,6 +137,7 @@
|
||||
/* SRBDS support */
|
||||
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
|
||||
#define RNGDS_MITG_DIS BIT(0)
|
||||
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
|
@ -323,6 +323,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
|
||||
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
|
||||
#include <asm/segment.h>
|
||||
|
||||
/**
|
||||
|
@ -21,7 +21,6 @@ struct saved_context {
|
||||
#endif
|
||||
unsigned long cr0, cr2, cr3, cr4;
|
||||
u64 misc_enable;
|
||||
bool misc_enable_saved;
|
||||
struct saved_msrs saved_msrs;
|
||||
struct desc_ptr gdt_desc;
|
||||
struct desc_ptr idt;
|
||||
@ -30,6 +29,7 @@ struct saved_context {
|
||||
unsigned long tr;
|
||||
unsigned long safety;
|
||||
unsigned long return_address;
|
||||
bool misc_enable_saved;
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif /* _ASM_X86_SUSPEND_32_H */
|
||||
|
@ -14,9 +14,13 @@
|
||||
* Image of the saved processor state, used by the low level ACPI suspend to
|
||||
* RAM code and by the low level hibernation code.
|
||||
*
|
||||
* If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
|
||||
* __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
|
||||
* still work as required.
|
||||
* If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
|
||||
* and make sure that __save/__restore_processor_state(), defined in
|
||||
* arch/x86/power/cpu.c, still work as required.
|
||||
*
|
||||
* Because the structure is packed, make sure to avoid unaligned members. For
|
||||
* optimisation purposes but also because tools like kmemleak only search for
|
||||
* pointers that are aligned.
|
||||
*/
|
||||
struct saved_context {
|
||||
struct pt_regs regs;
|
||||
@ -36,7 +40,6 @@ struct saved_context {
|
||||
|
||||
unsigned long cr0, cr2, cr3, cr4, cr8;
|
||||
u64 misc_enable;
|
||||
bool misc_enable_saved;
|
||||
struct saved_msrs saved_msrs;
|
||||
unsigned long efer;
|
||||
u16 gdt_pad; /* Unused */
|
||||
@ -48,6 +51,7 @@ struct saved_context {
|
||||
unsigned long tr;
|
||||
unsigned long safety;
|
||||
unsigned long return_address;
|
||||
bool misc_enable_saved;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define loaddebug(thread,register) \
|
||||
|
@ -5,6 +5,15 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_X86_TSC) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_TSC))
|
||||
return random_get_entropy_fallback();
|
||||
return rdtsc();
|
||||
}
|
||||
#define random_get_entropy random_get_entropy
|
||||
|
||||
/* Assume we use the PIT time source for the clock tick */
|
||||
#define CLOCK_TICK_RATE PIT_TICK_RATE
|
||||
|
||||
|
@ -22,13 +22,12 @@ extern void disable_TSC(void);
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
#ifndef CONFIG_X86_TSC
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
if (!IS_ENABLED(CONFIG_X86_TSC) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_TSC))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return rdtsc();
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||
|
||||
|
@ -167,7 +167,7 @@ static __init int setup_apicpmtimer(char *s)
|
||||
{
|
||||
apic_calibrate_pmtmr = 1;
|
||||
notsc_setup(NULL);
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("apicpmtimer", setup_apicpmtimer);
|
||||
#endif
|
||||
|
@ -40,8 +40,10 @@ static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
static void __init mds_select_mitigation(void);
|
||||
static void __init mds_print_mitigation(void);
|
||||
static void __init md_clear_update_mitigation(void);
|
||||
static void __init md_clear_select_mitigation(void);
|
||||
static void __init taa_select_mitigation(void);
|
||||
static void __init mmio_select_mitigation(void);
|
||||
static void __init srbds_select_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
||||
@ -76,6 +78,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear);
|
||||
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
EXPORT_SYMBOL_GPL(mds_idle_clear);
|
||||
|
||||
/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
|
||||
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
identify_boot_cpu();
|
||||
@ -108,16 +114,9 @@ void __init check_bugs(void)
|
||||
spectre_v2_select_mitigation();
|
||||
ssb_select_mitigation();
|
||||
l1tf_select_mitigation();
|
||||
mds_select_mitigation();
|
||||
taa_select_mitigation();
|
||||
md_clear_select_mitigation();
|
||||
srbds_select_mitigation();
|
||||
|
||||
/*
|
||||
* As MDS and TAA mitigations are inter-related, print MDS
|
||||
* mitigation until after TAA mitigation selection is done.
|
||||
*/
|
||||
mds_print_mitigation();
|
||||
|
||||
arch_smt_update();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -257,14 +256,6 @@ static void __init mds_select_mitigation(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init mds_print_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
pr_info("%s\n", mds_strings[mds_mitigation]);
|
||||
}
|
||||
|
||||
static int __init mds_cmdline(char *str)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
||||
@ -312,7 +303,7 @@ static void __init taa_select_mitigation(void)
|
||||
/* TSX previously disabled by tsx=off */
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu_mitigations_off()) {
|
||||
@ -326,7 +317,7 @@ static void __init taa_select_mitigation(void)
|
||||
*/
|
||||
if (taa_mitigation == TAA_MITIGATION_OFF &&
|
||||
mds_mitigation == MDS_MITIGATION_OFF)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
|
||||
taa_mitigation = TAA_MITIGATION_VERW;
|
||||
@ -358,18 +349,6 @@ static void __init taa_select_mitigation(void)
|
||||
|
||||
if (taa_nosmt || cpu_mitigations_auto_nosmt())
|
||||
cpu_smt_disable(false);
|
||||
|
||||
/*
|
||||
* Update MDS mitigation, if necessary, as the mds_user_clear is
|
||||
* now enabled for TAA mitigation.
|
||||
*/
|
||||
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MDS)) {
|
||||
mds_mitigation = MDS_MITIGATION_FULL;
|
||||
mds_select_mitigation();
|
||||
}
|
||||
out:
|
||||
pr_info("%s\n", taa_strings[taa_mitigation]);
|
||||
}
|
||||
|
||||
static int __init tsx_async_abort_parse_cmdline(char *str)
|
||||
@ -393,6 +372,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
|
||||
}
|
||||
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "MMIO Stale Data: " fmt
|
||||
|
||||
enum mmio_mitigations {
|
||||
MMIO_MITIGATION_OFF,
|
||||
MMIO_MITIGATION_UCODE_NEEDED,
|
||||
MMIO_MITIGATION_VERW,
|
||||
};
|
||||
|
||||
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
|
||||
static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
|
||||
static bool mmio_nosmt __ro_after_init = false;
|
||||
|
||||
static const char * const mmio_strings[] = {
|
||||
[MMIO_MITIGATION_OFF] = "Vulnerable",
|
||||
[MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
|
||||
[MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
|
||||
};
|
||||
|
||||
static void __init mmio_select_mitigation(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
cpu_mitigations_off()) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable CPU buffer clear mitigation for host and VMM, if also affected
|
||||
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
|
||||
boot_cpu_has(X86_FEATURE_RTM)))
|
||||
static_branch_enable(&mds_user_clear);
|
||||
else
|
||||
static_branch_enable(&mmio_stale_data_clear);
|
||||
|
||||
/*
|
||||
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
|
||||
* be propagated to uncore buffers, clearing the Fill buffers on idle
|
||||
* is required irrespective of SMT state.
|
||||
*/
|
||||
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
|
||||
/*
|
||||
* Check if the system has the right microcode.
|
||||
*
|
||||
* CPU Fill buffer clear mitigation is enumerated by either an explicit
|
||||
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
|
||||
* affected systems.
|
||||
*/
|
||||
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
|
||||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
|
||||
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)))
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
else
|
||||
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
|
||||
|
||||
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
|
||||
cpu_smt_disable(false);
|
||||
}
|
||||
|
||||
static int __init mmio_stale_data_parse_cmdline(char *str)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
return 0;
|
||||
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
} else if (!strcmp(str, "full")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
} else if (!strcmp(str, "full,nosmt")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
mmio_nosmt = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
|
||||
static void __init md_clear_update_mitigation(void)
|
||||
{
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!static_key_enabled(&mds_user_clear))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
|
||||
* mitigation, if necessary.
|
||||
*/
|
||||
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MDS)) {
|
||||
mds_mitigation = MDS_MITIGATION_FULL;
|
||||
mds_select_mitigation();
|
||||
}
|
||||
if (taa_mitigation == TAA_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
taa_mitigation = TAA_MITIGATION_VERW;
|
||||
taa_select_mitigation();
|
||||
}
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
mmio_select_mitigation();
|
||||
}
|
||||
out:
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS))
|
||||
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_TAA))
|
||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
{
|
||||
mds_select_mitigation();
|
||||
taa_select_mitigation();
|
||||
mmio_select_mitigation();
|
||||
|
||||
/*
|
||||
* As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
|
||||
* and print their mitigation after MDS, TAA and MMIO Stale Data
|
||||
* mitigation selection is done.
|
||||
*/
|
||||
md_clear_update_mitigation();
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "SRBDS: " fmt
|
||||
|
||||
@ -454,11 +578,13 @@ static void __init srbds_select_mitigation(void)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Check to see if this is one of the MDS_NO systems supporting
|
||||
* TSX that are only exposed to SRBDS when TSX is enabled.
|
||||
* Check to see if this is one of the MDS_NO systems supporting TSX that
|
||||
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
|
||||
* by Processor MMIO Stale Data vulnerability.
|
||||
*/
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
|
||||
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
|
||||
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
|
||||
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
|
||||
@ -1066,6 +1192,8 @@ static void update_indir_branch_cond(void)
|
||||
/* Update the static key controlling the MDS CPU buffer clear in idle */
|
||||
static void update_mds_branch_idle(void)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable the idle clearing if SMT is active on CPUs which are
|
||||
* affected only by MSBDS and not any other MDS variant.
|
||||
@ -1077,14 +1205,17 @@ static void update_mds_branch_idle(void)
|
||||
if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
|
||||
return;
|
||||
|
||||
if (sched_smt_active())
|
||||
if (sched_smt_active()) {
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
else
|
||||
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
|
||||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
|
||||
static_branch_disable(&mds_idle_clear);
|
||||
}
|
||||
}
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
|
||||
void arch_smt_update(void)
|
||||
{
|
||||
@ -1129,6 +1260,16 @@ void arch_smt_update(void)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
@ -1680,6 +1821,20 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
{
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
||||
mmio_strings[mmio_mitigation]);
|
||||
}
|
||||
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
@ -1777,6 +1932,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_SRBDS:
|
||||
return srbds_show_state(buf);
|
||||
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1828,4 +1986,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
#endif
|
||||
|
@ -970,18 +970,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
X86_FEATURE_ANY, issues)
|
||||
|
||||
#define SRBDS BIT(0)
|
||||
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO BIT(1)
|
||||
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO_SBDS BIT(2)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPINGS(0x3, 0x5), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
|
||||
BIT(7) | BIT(0xB), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1002,6 +1026,13 @@ u64 x86_read_arch_cap_msr(void)
|
||||
return ia32_cap;
|
||||
}
|
||||
|
||||
static bool arch_cap_mmio_immune(u64 ia32_cap)
|
||||
{
|
||||
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_PSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
|
||||
}
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
@ -1053,12 +1084,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
/*
|
||||
* SRBDS affects CPUs which support RDRAND or RDSEED and are listed
|
||||
* in the vulnerability blacklist.
|
||||
*
|
||||
* Some of the implications and mitigation of Shared Buffers Data
|
||||
* Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
|
||||
* SRBDS.
|
||||
*/
|
||||
if ((cpu_has(c, X86_FEATURE_RDRAND) ||
|
||||
cpu_has(c, X86_FEATURE_RDSEED)) &&
|
||||
cpu_matches(cpu_vuln_blacklist, SRBDS))
|
||||
cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
|
||||
setup_force_cpu_bug(X86_BUG_SRBDS);
|
||||
|
||||
/*
|
||||
* Processor MMIO Stale Data bug enumeration
|
||||
*
|
||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||
* not want the guest to enumerate the bug.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
@ -71,7 +71,7 @@ static bool ring3mwait_disabled __read_mostly;
|
||||
static int __init ring3mwait_disable(char *__unused)
|
||||
{
|
||||
ring3mwait_disabled = true;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("ring3mwait=disable", ring3mwait_disable);
|
||||
|
||||
|
@ -175,8 +175,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
|
||||
*
|
||||
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
|
||||
* task is current or it can't be running, otherwise we can race
|
||||
* with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
|
||||
* PTRACE_KILL is not safe.
|
||||
* with __switch_to_xtra(). We rely on ptrace_freeze_traced().
|
||||
*/
|
||||
local_irq_disable();
|
||||
debugctl = get_debugctlmsr();
|
||||
|
@ -70,9 +70,6 @@ static int __init control_va_addr_alignment(char *str)
|
||||
if (*str == 0)
|
||||
return 1;
|
||||
|
||||
if (*str == '=')
|
||||
str++;
|
||||
|
||||
if (!strcmp(str, "32"))
|
||||
va_align.flags = ALIGN_VA_32;
|
||||
else if (!strcmp(str, "64"))
|
||||
@ -82,11 +79,11 @@ static int __init control_va_addr_alignment(char *str)
|
||||
else if (!strcmp(str, "on"))
|
||||
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
||||
else
|
||||
return 0;
|
||||
pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("align_va_addr", control_va_addr_alignment);
|
||||
__setup("align_va_addr=", control_va_addr_alignment);
|
||||
|
||||
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
|
@ -214,6 +214,9 @@ static const struct {
|
||||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
/* Control for disabling CPU Fill buffer clear */
|
||||
static bool __read_mostly vmx_fb_clear_ctrl_available;
|
||||
|
||||
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
|
||||
{
|
||||
struct page *page;
|
||||
@ -820,6 +823,8 @@ struct vcpu_vmx {
|
||||
*/
|
||||
u64 msr_ia32_feature_control;
|
||||
u64 msr_ia32_feature_control_valid_bits;
|
||||
u64 msr_ia32_mcu_opt_ctrl;
|
||||
bool disable_fb_clear;
|
||||
};
|
||||
|
||||
enum segment_cache_field {
|
||||
@ -1628,6 +1633,60 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
|
||||
: : "a" (&operand), "c" (ext) : "cc", "memory");
|
||||
}
|
||||
|
||||
static void vmx_setup_fb_clear_ctrl(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
|
||||
!boot_cpu_has_bug(X86_BUG_MDS) &&
|
||||
!boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
|
||||
if (msr & ARCH_CAP_FB_CLEAR_CTRL)
|
||||
vmx_fb_clear_ctrl_available = true;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
if (!vmx->disable_fb_clear)
|
||||
return;
|
||||
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
msr |= FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
/* Cache the MSR value to avoid reading it later */
|
||||
vmx->msr_ia32_mcu_opt_ctrl = msr;
|
||||
}
|
||||
|
||||
static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
|
||||
{
|
||||
if (!vmx->disable_fb_clear)
|
||||
return;
|
||||
|
||||
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
|
||||
}
|
||||
|
||||
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
|
||||
|
||||
/*
|
||||
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
|
||||
* at VMEntry. Skip the MSR read/write when a guest has no use case to
|
||||
* execute VERW.
|
||||
*/
|
||||
if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
|
||||
((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
|
||||
vmx->disable_fb_clear = false;
|
||||
}
|
||||
|
||||
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
|
||||
{
|
||||
int i;
|
||||
@ -3703,6 +3762,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
ret = kvm_set_msr_common(vcpu, msr_info);
|
||||
}
|
||||
|
||||
/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
|
||||
if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
|
||||
vmx_update_fb_clear_dis(vcpu, vmx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -6008,6 +6071,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
update_exception_bitmap(vcpu);
|
||||
|
||||
vpid_sync_context(vmx->vpid);
|
||||
|
||||
vmx_update_fb_clear_dis(vcpu, vmx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9779,6 +9844,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx_l1d_flush(vcpu);
|
||||
else if (static_branch_unlikely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
|
||||
kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
mds_clear_cpu_buffers();
|
||||
|
||||
vmx_disable_fb_clear(vmx);
|
||||
|
||||
asm(
|
||||
/* Store host registers */
|
||||
@ -9897,6 +9967,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
);
|
||||
|
||||
vmx_enable_fb_clear(vmx);
|
||||
|
||||
/*
|
||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||
* SPEC_CTRL MSR it may have left it on; save the value and
|
||||
@ -12921,8 +12993,11 @@ static int __init vmx_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
vmx_setup_fb_clear_ctrl();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
|
||||
|
||||
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
|
||||
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
|
||||
}
|
||||
|
@ -1127,6 +1127,10 @@ u64 kvm_get_arch_capabilities(void)
|
||||
|
||||
/* KVM does not emulate MSR_IA32_TSX_CTRL. */
|
||||
data &= ~ARCH_CAP_TSX_CTRL_MSR;
|
||||
|
||||
/* Guests don't need to know "Fill buffer clear control" exists */
|
||||
data &= ~ARCH_CAP_FB_CLEAR_CTRL;
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,8 @@ static void delay_loop(unsigned long loops)
|
||||
" jnz 2b \n"
|
||||
"3: dec %0 \n"
|
||||
|
||||
: /* we don't need output */
|
||||
:"a" (loops)
|
||||
: "+a" (loops)
|
||||
:
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ int pat_debug_enable;
|
||||
static int __init pat_debug_setup(char *str)
|
||||
{
|
||||
pat_debug_enable = 1;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("debugpat", pat_debug_setup);
|
||||
|
||||
|
@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
|
||||
{
|
||||
long res;
|
||||
void *stub_addr;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
|
||||
|
||||
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
|
||||
(sizeof(*desc) + sizeof(long) - 1) &
|
||||
~(sizeof(long) - 1),
|
||||
sizeof(*desc) / sizeof(long),
|
||||
addr, &stub_addr);
|
||||
if (!res) {
|
||||
unsigned long args[] = { func,
|
||||
|
@ -30,10 +30,6 @@
|
||||
|
||||
extern unsigned long ccount_freq;
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
#define get_cycles() (0)
|
||||
|
||||
void local_timer_setup(unsigned cpu);
|
||||
|
||||
/*
|
||||
@ -69,4 +65,6 @@ static inline void set_linux_timer (unsigned long ccompare)
|
||||
WSR_CCOMPARE(LINUX_TIMER, ccompare);
|
||||
}
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
|
||||
#endif /* _XTENSA_TIMEX_H */
|
||||
|
@ -35,12 +35,12 @@
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
child->ptrace |= PT_SINGLESTEP;
|
||||
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
{
|
||||
child->ptrace &= ~PT_SINGLESTEP;
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -459,7 +459,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
/* Set up the stack frame */
|
||||
ret = setup_frame(&ksig, sigmask_to_save(), regs);
|
||||
signal_setup_done(ret, &ksig, 0);
|
||||
if (current->ptrace & PT_SINGLESTEP)
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
task_pt_regs(current)->icountlevel = 1;
|
||||
|
||||
return;
|
||||
@ -485,7 +485,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
/* If there's no signal to deliver, we just restore the saved mask. */
|
||||
restore_saved_sigmask();
|
||||
|
||||
if (current->ptrace & PT_SINGLESTEP)
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
task_pt_regs(current)->icountlevel = 1;
|
||||
return;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "blacklist.h"
|
||||
|
||||
const char __initdata *const blacklist_hashes[] = {
|
||||
const char __initconst *const blacklist_hashes[] = {
|
||||
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
|
||||
, NULL
|
||||
};
|
||||
|
218
crypto/drbg.c
218
crypto/drbg.c
@ -219,6 +219,57 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* FIPS 140-2 continuous self test for the noise source
|
||||
* The test is performed on the noise source input data. Thus, the function
|
||||
* implicitly knows the size of the buffer to be equal to the security
|
||||
* strength.
|
||||
*
|
||||
* Note, this function disregards the nonce trailing the entropy data during
|
||||
* initial seeding.
|
||||
*
|
||||
* drbg->drbg_mutex must have been taken.
|
||||
*
|
||||
* @drbg DRBG handle
|
||||
* @entropy buffer of seed data to be checked
|
||||
*
|
||||
* return:
|
||||
* 0 on success
|
||||
* -EAGAIN on when the CTRNG is not yet primed
|
||||
* < 0 on error
|
||||
*/
|
||||
static int drbg_fips_continuous_test(struct drbg_state *drbg,
|
||||
const unsigned char *entropy)
|
||||
{
|
||||
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
|
||||
return 0;
|
||||
|
||||
/* skip test if we test the overall system */
|
||||
if (list_empty(&drbg->test_data.list))
|
||||
return 0;
|
||||
/* only perform test in FIPS mode */
|
||||
if (!fips_enabled)
|
||||
return 0;
|
||||
|
||||
if (!drbg->fips_primed) {
|
||||
/* Priming of FIPS test */
|
||||
memcpy(drbg->prev, entropy, entropylen);
|
||||
drbg->fips_primed = true;
|
||||
/* priming: another round is needed */
|
||||
return -EAGAIN;
|
||||
}
|
||||
ret = memcmp(drbg->prev, entropy, entropylen);
|
||||
if (!ret)
|
||||
panic("DRBG continuous self test failed\n");
|
||||
memcpy(drbg->prev, entropy, entropylen);
|
||||
|
||||
/* the test shall pass when the two values are not equal */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an integer into a byte representation of this integer.
|
||||
* The byte representation is big-endian
|
||||
@ -986,55 +1037,79 @@ static const struct drbg_state_ops drbg_hash_ops = {
|
||||
******************************************************************/
|
||||
|
||||
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
|
||||
int reseed)
|
||||
int reseed, enum drbg_seed_state new_seed_state)
|
||||
{
|
||||
int ret = drbg->d_ops->update(drbg, seed, reseed);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drbg->seeded = true;
|
||||
drbg->seeded = new_seed_state;
|
||||
/* 10.1.1.2 / 10.1.1.3 step 5 */
|
||||
drbg->reseed_ctr = 1;
|
||||
|
||||
switch (drbg->seeded) {
|
||||
case DRBG_SEED_STATE_UNSEEDED:
|
||||
/* Impossible, but handle it to silence compiler warnings. */
|
||||
case DRBG_SEED_STATE_PARTIAL:
|
||||
/*
|
||||
* Require frequent reseeds until the seed source is
|
||||
* fully initialized.
|
||||
*/
|
||||
drbg->reseed_threshold = 50;
|
||||
break;
|
||||
|
||||
case DRBG_SEED_STATE_FULL:
|
||||
/*
|
||||
* Seed source has become fully initialized, frequent
|
||||
* reseeds no longer required.
|
||||
*/
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void drbg_async_seed(struct work_struct *work)
|
||||
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
|
||||
unsigned char *entropy,
|
||||
unsigned int entropylen)
|
||||
{
|
||||
int ret;
|
||||
|
||||
do {
|
||||
get_random_bytes(entropy, entropylen);
|
||||
ret = drbg_fips_continuous_test(drbg, entropy);
|
||||
if (ret && ret != -EAGAIN)
|
||||
return ret;
|
||||
} while (ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drbg_seed_from_random(struct drbg_state *drbg)
|
||||
{
|
||||
struct drbg_string data;
|
||||
LIST_HEAD(seedlist);
|
||||
struct drbg_state *drbg = container_of(work, struct drbg_state,
|
||||
seed_work);
|
||||
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
||||
unsigned char entropy[32];
|
||||
int ret;
|
||||
|
||||
BUG_ON(!entropylen);
|
||||
BUG_ON(entropylen > sizeof(entropy));
|
||||
get_random_bytes(entropy, entropylen);
|
||||
|
||||
drbg_string_fill(&data, entropy, entropylen);
|
||||
list_add_tail(&data.list, &seedlist);
|
||||
|
||||
mutex_lock(&drbg->drbg_mutex);
|
||||
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* If nonblocking pool is initialized, deactivate Jitter RNG */
|
||||
crypto_free_rng(drbg->jent);
|
||||
drbg->jent = NULL;
|
||||
|
||||
/* Set seeded to false so that if __drbg_seed fails the
|
||||
* next generate call will trigger a reseed.
|
||||
*/
|
||||
drbg->seeded = false;
|
||||
|
||||
__drbg_seed(drbg, &seedlist, true);
|
||||
|
||||
if (drbg->seeded)
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
|
||||
mutex_unlock(&drbg->drbg_mutex);
|
||||
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
|
||||
|
||||
out:
|
||||
memzero_explicit(entropy, entropylen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1056,6 +1131,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
||||
struct drbg_string data1;
|
||||
LIST_HEAD(seedlist);
|
||||
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
|
||||
|
||||
/* 9.1 / 9.2 / 9.3.1 step 3 */
|
||||
if (pers && pers->len > (drbg_max_addtl(drbg))) {
|
||||
@ -1083,7 +1159,12 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
BUG_ON((entropylen * 2) > sizeof(entropy));
|
||||
|
||||
/* Get seed from in-kernel /dev/urandom */
|
||||
get_random_bytes(entropy, entropylen);
|
||||
if (!rng_is_initialized())
|
||||
new_seed_state = DRBG_SEED_STATE_PARTIAL;
|
||||
|
||||
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!drbg->jent) {
|
||||
drbg_string_fill(&data1, entropy, entropylen);
|
||||
@ -1096,7 +1177,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
entropylen);
|
||||
if (ret) {
|
||||
pr_devel("DRBG: jent failed with %d\n", ret);
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Do not treat the transient failure of the
|
||||
* Jitter RNG as an error that needs to be
|
||||
* reported. The combined number of the
|
||||
* maximum reseed threshold times the maximum
|
||||
* number of Jitter RNG transient errors is
|
||||
* less than the reseed threshold required by
|
||||
* SP800-90A allowing us to treat the
|
||||
* transient errors as such.
|
||||
*
|
||||
* However, we mandate that at least the first
|
||||
* seeding operation must succeed with the
|
||||
* Jitter RNG.
|
||||
*/
|
||||
if (!reseed || ret != -EAGAIN)
|
||||
goto out;
|
||||
}
|
||||
|
||||
drbg_string_fill(&data1, entropy, entropylen * 2);
|
||||
@ -1121,8 +1218,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
memset(drbg->C, 0, drbg_statelen(drbg));
|
||||
}
|
||||
|
||||
ret = __drbg_seed(drbg, &seedlist, reseed);
|
||||
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
|
||||
|
||||
out:
|
||||
memzero_explicit(entropy, entropylen * 2);
|
||||
|
||||
return ret;
|
||||
@ -1144,6 +1242,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
|
||||
drbg->reseed_ctr = 0;
|
||||
drbg->d_ops = NULL;
|
||||
drbg->core = NULL;
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
|
||||
kzfree(drbg->prev);
|
||||
drbg->prev = NULL;
|
||||
drbg->fips_primed = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1213,6 +1316,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
||||
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
|
||||
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
|
||||
GFP_KERNEL);
|
||||
if (!drbg->prev)
|
||||
goto fini;
|
||||
drbg->fips_primed = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fini:
|
||||
@ -1285,19 +1396,25 @@ static int drbg_generate(struct drbg_state *drbg,
|
||||
* here. The spec is a bit convoluted here, we make it simpler.
|
||||
*/
|
||||
if (drbg->reseed_threshold < drbg->reseed_ctr)
|
||||
drbg->seeded = false;
|
||||
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
|
||||
|
||||
if (drbg->pr || !drbg->seeded) {
|
||||
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
|
||||
pr_devel("DRBG: reseeding before generation (prediction "
|
||||
"resistance: %s, state %s)\n",
|
||||
drbg->pr ? "true" : "false",
|
||||
drbg->seeded ? "seeded" : "unseeded");
|
||||
(drbg->seeded == DRBG_SEED_STATE_FULL ?
|
||||
"seeded" : "unseeded"));
|
||||
/* 9.3.1 steps 7.1 through 7.3 */
|
||||
len = drbg_seed(drbg, addtl, true);
|
||||
if (len)
|
||||
goto err;
|
||||
/* 9.3.1 step 7.4 */
|
||||
addtl = NULL;
|
||||
} else if (rng_is_initialized() &&
|
||||
drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
|
||||
len = drbg_seed_from_random(drbg);
|
||||
if (len)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (addtl && 0 < addtl->len)
|
||||
@ -1390,51 +1507,15 @@ static int drbg_generate_long(struct drbg_state *drbg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
|
||||
{
|
||||
struct drbg_state *drbg = container_of(rdy, struct drbg_state,
|
||||
random_ready);
|
||||
|
||||
schedule_work(&drbg->seed_work);
|
||||
}
|
||||
|
||||
static int drbg_prepare_hrng(struct drbg_state *drbg)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* We do not need an HRNG in test mode. */
|
||||
if (list_empty(&drbg->test_data.list))
|
||||
return 0;
|
||||
|
||||
INIT_WORK(&drbg->seed_work, drbg_async_seed);
|
||||
|
||||
drbg->random_ready.owner = THIS_MODULE;
|
||||
drbg->random_ready.func = drbg_schedule_async_seed;
|
||||
|
||||
err = add_random_ready_callback(&drbg->random_ready);
|
||||
|
||||
switch (err) {
|
||||
case 0:
|
||||
break;
|
||||
|
||||
case -EALREADY:
|
||||
err = 0;
|
||||
/* fall through */
|
||||
|
||||
default:
|
||||
drbg->random_ready.func = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
|
||||
|
||||
/*
|
||||
* Require frequent reseeds until the seed source is fully
|
||||
* initialized.
|
||||
*/
|
||||
drbg->reseed_threshold = 50;
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1477,7 +1558,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
if (!drbg->core) {
|
||||
drbg->core = &drbg_cores[coreref];
|
||||
drbg->pr = pr;
|
||||
drbg->seeded = false;
|
||||
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
|
||||
ret = drbg_alloc_state(drbg);
|
||||
@ -1528,12 +1609,9 @@ free_everything:
|
||||
*/
|
||||
static int drbg_uninstantiate(struct drbg_state *drbg)
|
||||
{
|
||||
if (drbg->random_ready.func) {
|
||||
del_random_ready_callback(&drbg->random_ready);
|
||||
cancel_work_sync(&drbg->seed_work);
|
||||
if (!IS_ERR_OR_NULL(drbg->jent))
|
||||
crypto_free_rng(drbg->jent);
|
||||
drbg->jent = NULL;
|
||||
}
|
||||
|
||||
if (drbg->d_ops)
|
||||
drbg->d_ops->crypto_fini(drbg);
|
||||
|
17
crypto/md4.c
17
crypto/md4.c
@ -64,23 +64,6 @@ static inline u32 H(u32 x, u32 y, u32 z)
|
||||
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
|
||||
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
|
||||
|
||||
/* XXX: this stuff can be optimized */
|
||||
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__le32_to_cpus(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__cpu_to_le32s(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static void md4_transform(u32 *hash, u32 const *in)
|
||||
{
|
||||
u32 a, b, c, d;
|
||||
|
17
crypto/md5.c
17
crypto/md5.c
@ -32,23 +32,6 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
|
||||
|
||||
/* XXX: this stuff can be optimized */
|
||||
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__le32_to_cpus(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__cpu_to_le32s(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
#define F1(x, y, z) (z ^ (x & (y ^ z)))
|
||||
#define F2(x, y, z) F1(z, x, y)
|
||||
#define F3(x, y, z) (x ^ y ^ z)
|
||||
|
@ -6180,7 +6180,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
const struct ata_port_info * const * ppi,
|
||||
int n_ports)
|
||||
{
|
||||
const struct ata_port_info *pi;
|
||||
const struct ata_port_info *pi = &ata_dummy_port_info;
|
||||
struct ata_host *host;
|
||||
int i, j;
|
||||
|
||||
@ -6188,7 +6188,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
if (!host)
|
||||
return NULL;
|
||||
|
||||
for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
|
||||
for (i = 0, j = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ppi[j])
|
||||
|
@ -196,7 +196,7 @@ static struct {
|
||||
{ XFER_PIO_0, "XFER_PIO_0" },
|
||||
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
|
||||
};
|
||||
ata_bitfield_name_match(xfer,ata_xfer_names)
|
||||
ata_bitfield_name_search(xfer, ata_xfer_names)
|
||||
|
||||
/*
|
||||
* ATA Port attributes
|
||||
|
@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
||||
int i;
|
||||
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
|
||||
if (!res_dma) {
|
||||
put_device(&dma_dev->dev);
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
|
||||
resource_size(res_dma));
|
||||
if (!cf_port->dma_base) {
|
||||
put_device(&dma_dev->dev);
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
||||
irq = i;
|
||||
irq_handler = octeon_cf_interrupt;
|
||||
}
|
||||
put_device(&dma_dev->dev);
|
||||
}
|
||||
of_node_put(dma_node);
|
||||
}
|
||||
|
@ -662,6 +662,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
@ -671,6 +677,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
|
||||
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
|
||||
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
|
||||
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
|
||||
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
@ -682,6 +689,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_tsx_async_abort.attr,
|
||||
&dev_attr_itlb_multihit.attr,
|
||||
&dev_attr_srbds.attr,
|
||||
&dev_attr_mmio_stale_data.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -348,6 +348,7 @@ static int register_node(struct node *node, int num)
|
||||
*/
|
||||
void unregister_node(struct node *node)
|
||||
{
|
||||
compaction_unregister_node(node);
|
||||
hugetlb_unregister_node(node); /* no-op, if memoryless node */
|
||||
|
||||
device_unregister(&node->dev);
|
||||
|
@ -1275,7 +1275,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
||||
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
||||
struct block_device *bdev)
|
||||
{
|
||||
sock_shutdown(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
__invalidate_device(bdev, true);
|
||||
nbd_bdev_reset(bdev);
|
||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||
@ -1382,15 +1382,20 @@ static struct nbd_config *nbd_alloc_config(void)
|
||||
{
|
||||
struct nbd_config *config;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
|
||||
if (!config)
|
||||
return NULL;
|
||||
if (!config) {
|
||||
module_put(THIS_MODULE);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&config->recv_threads, 0);
|
||||
init_waitqueue_head(&config->recv_wq);
|
||||
init_waitqueue_head(&config->conn_wait);
|
||||
config->blksize = NBD_DEF_BLKSIZE;
|
||||
atomic_set(&config->live_connections, 0);
|
||||
try_module_get(THIS_MODULE);
|
||||
return config;
|
||||
}
|
||||
|
||||
@ -1417,12 +1422,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
config = nbd->config = nbd_alloc_config();
|
||||
if (!config) {
|
||||
ret = -ENOMEM;
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
ret = PTR_ERR(config);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
refcount_inc(&nbd->refs);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
@ -1803,13 +1809,14 @@ again:
|
||||
nbd_put(nbd);
|
||||
return -EINVAL;
|
||||
}
|
||||
config = nbd->config = nbd_alloc_config();
|
||||
if (!nbd->config) {
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
nbd_put(nbd);
|
||||
printk(KERN_ERR "nbd: couldn't allocate config\n");
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(config);
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
set_bit(NBD_BOUND, &config->runtime_flags);
|
||||
|
||||
@ -2319,6 +2326,12 @@ static void __exit nbd_cleanup(void)
|
||||
struct nbd_device *nbd;
|
||||
LIST_HEAD(del_list);
|
||||
|
||||
/*
|
||||
* Unregister netlink interface prior to waiting
|
||||
* for the completion of netlink commands.
|
||||
*/
|
||||
genl_unregister_family(&nbd_genl_family);
|
||||
|
||||
nbd_dbg_close();
|
||||
|
||||
mutex_lock(&nbd_index_mutex);
|
||||
@ -2328,13 +2341,15 @@ static void __exit nbd_cleanup(void)
|
||||
while (!list_empty(&del_list)) {
|
||||
nbd = list_first_entry(&del_list, struct nbd_device, list);
|
||||
list_del_init(&nbd->list);
|
||||
if (refcount_read(&nbd->config_refs))
|
||||
printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
|
||||
refcount_read(&nbd->config_refs));
|
||||
if (refcount_read(&nbd->refs) != 1)
|
||||
printk(KERN_ERR "nbd: possibly leaking a device\n");
|
||||
nbd_put(nbd);
|
||||
}
|
||||
|
||||
idr_destroy(&nbd_index_idr);
|
||||
genl_unregister_family(&nbd_genl_family);
|
||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||
}
|
||||
|
||||
|
@ -633,8 +633,6 @@ config MSM_RDBG
|
||||
for a debugger running on a host PC to communicate with a remote
|
||||
stub running on peripheral subsystems such as the ADSP, MODEM etc.
|
||||
|
||||
endmenu
|
||||
|
||||
config OKL4_PIPE
|
||||
bool "OKL4 Pipe Driver"
|
||||
depends on OKL4_GUEST
|
||||
@ -681,3 +679,42 @@ config VSERVICES_VTTY_COUNT
|
||||
help
|
||||
The maximum number of Virtual Services serial devices to support.
|
||||
This limit applies to both the client and server.
|
||||
|
||||
config RANDOM_TRUST_CPU
|
||||
bool "Initialize RNG using CPU RNG instructions"
|
||||
default y
|
||||
depends on ARCH_RANDOM
|
||||
help
|
||||
Initialize the RNG using random numbers supplied by the CPU's
|
||||
RNG instructions (e.g. RDRAND), if supported and available. These
|
||||
random numbers are never used directly, but are rather hashed into
|
||||
the main input pool, and this happens regardless of whether or not
|
||||
this option is enabled. Instead, this option controls whether the
|
||||
they are credited and hence can initialize the RNG. Additionally,
|
||||
other sources of randomness are always used, regardless of this
|
||||
setting. Enabling this implies trusting that the CPU can supply high
|
||||
quality and non-backdoored random numbers.
|
||||
|
||||
Say Y here unless you have reason to mistrust your CPU or believe
|
||||
its RNG facilities may be faulty. This may also be configured at
|
||||
boot time with "random.trust_cpu=on/off".
|
||||
|
||||
config RANDOM_TRUST_BOOTLOADER
|
||||
bool "Initialize RNG using bootloader-supplied seed"
|
||||
default y
|
||||
help
|
||||
Initialize the RNG using a seed supplied by the bootloader or boot
|
||||
environment (e.g. EFI or a bootloader-generated device tree). This
|
||||
seed is not used directly, but is rather hashed into the main input
|
||||
pool, and this happens regardless of whether or not this option is
|
||||
enabled. Instead, this option controls whether the seed is credited
|
||||
and hence can initialize the RNG. Additionally, other sources of
|
||||
randomness are always used, regardless of this setting. Enabling
|
||||
this implies trusting that the bootloader can supply high quality and
|
||||
non-backdoored seeds.
|
||||
|
||||
Say Y here unless you have reason to mistrust your bootloader or
|
||||
believe its RNG facilities may be faulty. This may also be configured
|
||||
at boot time with "random.trust_bootloader=on/off".
|
||||
|
||||
endmenu
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
@ -816,6 +816,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||
break;
|
||||
|
||||
case SSIF_GETTING_EVENTS:
|
||||
if (!msg) {
|
||||
/* Should never happen, but just in case. */
|
||||
dev_warn(&ssif_info->client->dev,
|
||||
"No message set while getting events\n");
|
||||
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
||||
/* Error getting event, probably done. */
|
||||
msg->done(msg);
|
||||
@ -839,6 +847,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||
break;
|
||||
|
||||
case SSIF_GETTING_MESSAGES:
|
||||
if (!msg) {
|
||||
/* Should never happen, but just in case. */
|
||||
dev_warn(&ssif_info->client->dev,
|
||||
"No message set while getting messages\n");
|
||||
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
||||
/* Error getting event, probably done. */
|
||||
msg->done(msg);
|
||||
@ -861,6 +877,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||
deliver_recv_msg(ssif_info, msg);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Should never happen, but just in case. */
|
||||
dev_warn(&ssif_info->client->dev,
|
||||
"Invalid state in message done handling: %d\n",
|
||||
ssif_info->ssif_state);
|
||||
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||
}
|
||||
|
||||
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -247,7 +247,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
|
||||
}
|
||||
|
||||
rps->irq = irq_of_parse_and_map(np, 0);
|
||||
if (rps->irq < 0) {
|
||||
if (!rps->irq) {
|
||||
ret = -EINVAL;
|
||||
goto err_iomap;
|
||||
}
|
||||
|
@ -227,6 +227,11 @@ static int __init sp804_of_init(struct device_node *np)
|
||||
struct clk *clk1, *clk2;
|
||||
const char *name = of_get_property(np, "compatible", NULL);
|
||||
|
||||
if (initialized) {
|
||||
pr_debug("%pOF: skipping further SP804 timer device\n", np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base)
|
||||
return -ENXIO;
|
||||
@ -235,11 +240,6 @@ static int __init sp804_of_init(struct device_node *np)
|
||||
writel(0, base + TIMER_CTRL);
|
||||
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
|
||||
|
||||
if (initialized || !of_device_is_available(np)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
clk1 = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk1))
|
||||
clk1 = NULL;
|
||||
|
@ -1299,19 +1299,14 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
edev->dev.type = &edev->extcon_dev_type;
|
||||
}
|
||||
|
||||
ret = device_register(&edev->dev);
|
||||
if (ret) {
|
||||
put_device(&edev->dev);
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
spin_lock_init(&edev->lock);
|
||||
edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
|
||||
sizeof(*edev->nh), GFP_KERNEL);
|
||||
if (edev->max_supported) {
|
||||
edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
|
||||
GFP_KERNEL);
|
||||
if (!edev->nh) {
|
||||
ret = -ENOMEM;
|
||||
device_unregister(&edev->dev);
|
||||
goto err_dev;
|
||||
goto err_alloc_nh;
|
||||
}
|
||||
}
|
||||
|
||||
edev->bnh = devm_kzalloc(&edev->dev,
|
||||
@ -1329,6 +1324,12 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
dev_set_drvdata(&edev->dev, edev);
|
||||
edev->state = 0;
|
||||
|
||||
ret = device_register(&edev->dev);
|
||||
if (ret) {
|
||||
put_device(&edev->dev);
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
mutex_lock(&extcon_dev_list_lock);
|
||||
list_add(&edev->entry, &extcon_dev_list);
|
||||
mutex_unlock(&extcon_dev_list_lock);
|
||||
@ -1336,6 +1337,9 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
return 0;
|
||||
|
||||
err_dev:
|
||||
if (edev->max_supported)
|
||||
kfree(edev->nh);
|
||||
err_alloc_nh:
|
||||
if (edev->max_supported)
|
||||
kfree(edev->extcon_dev_type.groups);
|
||||
err_alloc_groups:
|
||||
@ -1396,6 +1400,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
||||
if (edev->max_supported) {
|
||||
kfree(edev->extcon_dev_type.groups);
|
||||
kfree(edev->cables);
|
||||
kfree(edev->nh);
|
||||
}
|
||||
|
||||
put_device(&edev->dev);
|
||||
|
@ -602,7 +602,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
|
||||
"%d-%d", dh->type, entry->instance);
|
||||
|
||||
if (*ret) {
|
||||
kfree(entry);
|
||||
kobject_put(&entry->kobj);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
int ret;
|
||||
|
||||
if (cs->in.num_chunks == 0)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (!chunk_array)
|
||||
|
@ -1608,19 +1608,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
|
||||
|
||||
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
|
||||
{
|
||||
u8 i;
|
||||
struct amdgpu_clock_voltage_dependency_table *table =
|
||||
&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
if (table->entries[i].clk >= 0) /* XXX */
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= table->count)
|
||||
i = table->count - 1;
|
||||
|
||||
return i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kv_update_acp_boot_level(struct amdgpu_device *adev)
|
||||
|
@ -7238,17 +7238,15 @@ static int si_parse_power_table(struct amdgpu_device *adev)
|
||||
if (!adev->pm.dpm.ps)
|
||||
return -ENOMEM;
|
||||
power_state_offset = (u8 *)state_array->states;
|
||||
for (i = 0; i < state_array->ucNumEntries; i++) {
|
||||
for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
|
||||
u8 *idx;
|
||||
power_state = (union pplib_power_state *)power_state_offset;
|
||||
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(adev->pm.dpm.ps);
|
||||
if (ps == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.ps[i].ps_priv = ps;
|
||||
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
|
||||
non_clock_info,
|
||||
@ -7270,8 +7268,8 @@ static int si_parse_power_table(struct amdgpu_device *adev)
|
||||
k++;
|
||||
}
|
||||
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
|
||||
adev->pm.dpm.num_ps++;
|
||||
}
|
||||
adev->pm.dpm.num_ps = state_array->ucNumEntries;
|
||||
|
||||
/* fill in the vce power states */
|
||||
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
|
||||
|
@ -1279,8 +1279,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
|
||||
struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct analogix_dp_device *dp = to_dp(aux);
|
||||
int ret;
|
||||
|
||||
return analogix_dp_transfer(dp, msg);
|
||||
pm_runtime_get_sync(dp->dev);
|
||||
|
||||
ret = analogix_dp_detect_hpd(dp);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = analogix_dp_transfer(dp, msg);
|
||||
out:
|
||||
pm_runtime_put(dp->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
||||
|
@ -543,14 +543,15 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
|
||||
|
||||
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
|
||||
|
||||
if (gma_crtc->pipe == pipe)
|
||||
break;
|
||||
}
|
||||
return crtc;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int gma_connector_clones(struct drm_device *dev, int type_mask)
|
||||
|
@ -72,7 +72,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
|
||||
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
|
||||
if (plane == &ipu_crtc->plane[0]->base)
|
||||
disable_full = true;
|
||||
if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
disable_partial = true;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
|
||||
u32 tmp = readl(cec->regs + offset) & ~mask;
|
||||
|
||||
tmp |= val & mask;
|
||||
writel(val, cec->regs + offset);
|
||||
writel(tmp, cec->regs + offset);
|
||||
}
|
||||
|
||||
void mtk_cec_set_hpd_event(struct device *dev,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user