* 'linux-4.14.y' of https://github.com/openela/kernel-lts: (186 commits)
  LTS: Update to 4.14.344
  binder: signal epoll threads of self-work
  ANDROID: binder: Add thread->process_todo flag.
  scsi: bnx2fc: Fix skb double free in bnx2fc_rcv()
  scsi: bnx2fc: Remove set but not used variable 'oxid'
  net: check dev->gso_max_size in gso_features_check()
  driver: staging: count ashmem_range into SLAB_RECLAIMBLE
  net: warn if gso_type isn't set for a GSO SKB
  staging: android: ashmem: Remove use of unlikely()
  ALSA: hda/realtek: Enable headset on Lenovo M90 Gen5
  ALSA: hda/realtek: Enable headset onLenovo M70/M90
  ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB
  ALSA: hda/realtek - ALC897 headset MIC no sound
  ALSA: hda/realtek - Add headset Mic support for Lenovo ALC897 platform
  ALSA: hda/realtek: Fix the mic type detection issue for ASUS G551JW
  ALSA: hda/realtek - The front Mic on a HP machine doesn't work
  ALSA: hda/realtek - Enable the headset of Acer N50-600 with ALC662
  ALSA: hda/realtek - Enable headset mic of Acer X2660G with ALC662
  ALSA: hda/realtek - Add Headset Mic supported for HP cPC
  ALSA: hda/realtek - More constifications
  ...

Change-Id: I3d093c0e457ab7e7e7b98b46eb44e82b6f4636f9
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-05-08 19:24:35 -03:00
commit 59c72f3544
150 changed files with 2711 additions and 1063 deletions

View File

@ -2,4 +2,4 @@ upstream_repo: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
upstream_base: 4.19.304
base: 4.14.336
upstream_version: 4.19.311
version: 4.14.343
version: 4.14.344

555
.elts/meta/4.14.344.yaml Normal file
View File

@ -0,0 +1,555 @@
b6367d59022c3d29f26c2674b1d27b9a57fd7611:
title: 'ARM: 9303/1: kprobes: avoid missing-declaration warnings'
mainline: 1b9c3ddcec6a55e15d3e38e7405e2d078db02020
6ca1b2d3199e6cc70dbe78e0aac0c645864f254e:
title: 'IB/hfi1: Fix sdma.h tx->num_descs off-by-one errors'
mainline: fd8958efe8779d3db19c9124fce593ce681ac709
3897b04a0125d9a98b69d9e640894e989e0111f0:
title: 'ALSA: jack: Fix mutex call in snd_jack_report()'
mainline: 89dbb335cb6a627a4067bc42caa09c8bc3326d40
44918a75916b6ac426f3eb490ec190f7267ebe58:
title: 'pinctrl: amd: Detect internal GPIO0 debounce handling'
mainline: 968ab9261627fa305307e3935ca1a32fcddd36cb
baf47aad0cfd39d3fa596efcaee9515a4faa5333:
title: 'btrfs: fix extent buffer leak after tree mod log failure at split_node()'
mainline: ede600e497b1461d06d22a7d17703d9096868bc3
49d8396c56ccab57ecb82eac5f03d2ddbc6c5d5f:
title: 'ext4: fix to check return value of freeze_bdev() in ext4_shutdown()'
mainline: c4d13222afd8a64bf11bc7ec68645496ee8b54b9
e10468ba237d9e21c82e7ba195c65ade5c98f1f6:
title: 'iio: addac: stx104: Fix race condition for stx104_write_raw()'
mainline: 9740827468cea80c42db29e7171a50e99acf7328
0610bb030e1533960373b258cc1da102a9797ef4:
title: 'block: fix signed int overflow in Amiga partition support'
mainline: fc3d092c6bb48d5865fec15ed5b333c12f36288c
a450641d12ecfdc76146e9adbf9afa97d4066f13:
title: 'media: v4l2-fwnode: fix v4l2_fwnode_parse_link handling'
mainline: 453b0c8304dcbc6eed2836de8fee90bf5bcc7006
9e997f74ce0f2fcd1544dfb02dba9e34302e316c:
title: 'selftests/ftrace: Add new test case which checks non unique symbol'
mainline: 03b80ff8023adae6780e491f66e932df8165e3a0
c86b5b015e84106b7f27125afa8cab73b6049404:
title: 'iio: exynos-adc: request second interupt only when touchscreen mode is used'
mainline: 865b080e3229102f160889328ce2e8e97aa65ea0
1fc95525bb3120beb3ecb973c63d15451af155d1:
title: 'tty: Fix uninit-value access in ppp_sync_receive()'
mainline: 719639853d88071dfdfd8d9971eca9c283ff314c
99be7e54736c8c10ef7eb932663e6934e7211d40:
title: 'tipc: Fix kernel-infoleak due to uninitialized TLV value'
mainline: fb317eb23b5ee4c37b0656a9a52a3db58d9dd072
4425848d99f2d3b435b82908cb5d97cbe2a8bea0:
title: 'xen/events: fix delayed eoi list handling'
mainline: 47d970204054f859f35a2237baa75c2d84fcf436
44bca7e58d3781ecfcc50a057dc543cf1a4f5aa2:
title: 'ptp: annotate data-race around q->head and q->tail'
mainline: 73bde5a3294853947252cd9092a3517c7cb0cd2d
3dc6016771b5b1ba4a6b2e5b30bde6a60f58da0a:
title: 'ppp: limit MRU to 64K'
mainline: c0a2a1b0d631fc460d830f52d06211838874d655
5637b9415bdd7751c16e7c4bf6f5aac4153b9ad5:
title: 'macvlan: Don''t propagate promisc change to lower dev in passthru'
mainline: 7e1caeace0418381f36b3aa8403dfd82fc57fc53
cb012706e5b68819a0621bc309bef26361bc0242:
title: 'cifs: spnego: add '';'' in HOST_KEY_LEN'
mainline: ff31ba19d732efb9aca3633935d71085e68d5076
8905c5cb8ab75e1db511953a5ff6bcaabba923ab:
title: 'MIPS: KVM: Fix a build warning about variable set but not used'
mainline: 83767a67e7b6a0291cde5681ec7e3708f3f8f877
af8bc7a89e19db227d1dc677274e0551e557be36:
title: 'smb3: fix touch -h of symlink'
mainline: 475efd9808a3094944a56240b2711349e433fb66
d4ad1c73b6822d4420f5040e390a469525012265:
title: 'fbdev: stifb: Make the STI next font pointer a 32-bit signed offset'
mainline: 8a32aa17c1cd48df1ddaa78e45abcb8c7a2220d6
deff8222bd2f74ee0f6b68d182eda890faaf40ba:
title: 'IB/isert: Fix unaligned immediate-data handling'
mainline: 0b089c1ef7047652b13b4cdfdb1e0e7dbdb8c9ab
f22913e1425acf9ecf5c5aa3754d1db3d8606e59:
title: 'arm64: dts: mediatek: mt8173-evb: Fix regulator-fixed node names'
mainline: 24165c5dad7ba7c7624d05575a5e0cc851396c71
53a85c328dba07d400e92923f28a176ab66aa4fa:
title: 'net: check vlan filter feature in vlan_vids_add_by_dev() and vlan_vids_del_by_dev()'
mainline: 01a564bab4876007ce35f312e16797dfe40e4823
90fe1515b0d0c3b6a6791a8830634866335de049:
title: 'pinctrl: at91-pio4: use dedicated lock class for IRQ'
mainline: 14694179e561b5f2f7e56a0f590e2cb49a9cc7ab
cc34811325d1ae1e3058a8dc3223e53ac7a56879:
title: 'btrfs: do not allow non subvolume root targets for snapshot'
mainline: a8892fd71933126ebae3d60aec5918d4dceaae76
30e342297e0c2ad1593e205b183bc5d023e587e2:
title: 'smb: client: fix OOB in smbCalcSize()'
mainline: b35858b3786ddbb56e1c35138ba25d6adf8d0bef
edaa381535f5630e8a8d28b572eea3c42ff61d19:
title: 'usb: fotg210-hcd: delete an incorrect bounds test'
mainline: 7fbcd195e2b8cc952e4aeaeb50867b798040314c
d276b94d1aa48da69d5128bd20dbbd1ddf4eb432:
title: 'IB/hfi1: Fix sdma.h tx->num_descs off-by-one error'
mainline: e6f57c6881916df39db7d95981a8ad2b9c3458d6
b46a8405794cad6204be3042221a7c21f77d8c62:
title: 'pinctrl: amd: Only use special debounce behavior for GPIO 0'
mainline: 0d5ace1a07f7e846d0f6d972af60d05515599d0b
fdedafcd742d8559f1ad629c189a2536b7db9f46:
title: 'PCI: qcom: Disable write access to read only registers for IP v2.3.3'
mainline: a33d700e8eea76c62120cb3dbf5e01328f18319a
c3ac72987ed13ef8fa08a06df187933552352c4f:
title: 'ASoC: cs42l51: fix driver to properly autoload with automatic module loading'
mainline: e51df4f81b02bcdd828a04de7c1eb6a92988b61e
e90f7b2f28a78446741403c19d0d68863ee0193a:
title: 'x86/topology: Fix erroneous smp_num_siblings on Intel Hybrid platforms'
mainline: edc0a2b5957652f4685ef3516f519f84807087db
52fca5bb0ec336fc31cb97aec94471e9ca07d68c:
title: 'regmap: Account for register length in SMBus I/O limits'
mainline: 0c9d2eb5e94792fe64019008a04d4df5e57625af
416820cdafc7f91f2a0657857eb972a4943bd7a4:
title: 'sched/rt: pick_next_rt_entity(): check list_entry'
mainline: 7c4a5b89a0b5a57a64b601775b296abf77a9fe97
615179d3ecf2bbc4af25755f5eb29f8b42343a23:
title: 'arm64: dts: qcom: msm8996: Add missing interrupt to the USB2 controller'
mainline: 36541089c4733355ed844c67eebd0c3936953454
bfc632c94ee01d6df7f63b34853326caa34903f9:
title: 'MIPS: Alchemy: only build mmc support helpers if au1xmmc is enabled'
mainline: ef8f8f04a0b25e8f294b24350e8463a8d6a9ba0b
1c78ca6a42d34e98bbd22bd5eff7cf1313eae907:
title: 'xen-netback: use default TX queue size for vifs'
mainline: 66cf7435a26917c0c4d6245ad9137e7606e84fdf
e4325866403488f055694d166f5ce40dd78d8ee4:
title: 'r8169: fix the KCSAN reported data-race in rtl_tx while reading TxDescArray[entry].opts1'
mainline: dcf75a0f6bc136de94e88178ae5f51b7f879abc9
89953db2dc4fc004cc9f89585cd614ea925ab64c:
title: 'can: dev: can_restart(): don''t crash kernel if carrier is OK'
mainline: fe5c9940dfd8ba0c73672dddb30acd1b7a11d4c7
c9c68fd8c6b4b1d06691978cdeb6db4e24689038:
title: 'can: dev: can_restart(): fix race condition between controller restart and netif_carrier_on()'
mainline: 6841cab8c4504835e4011689cbdb3351dec693fd
607895f054178a48f6e1b93d9593204cd7ec23d6:
title: 'PCI: keystone: Don''t discard .remove() callback'
mainline: 200bddbb3f5202bbce96444fdc416305de14f547
049e87ba1c0af12f2a20fe643d3279f9f10a9a3e:
title: 'PCI: keystone: Don''t discard .probe() callback'
mainline: 7994db905c0fd692cf04c527585f08a91b560144
50115d1f00090102e4710e783df515d082b16df1:
title: 'ksmbd: fix wrong name of SMB2_CREATE_ALLOCATION_SIZE'
mainline: 13736654481198e519059d4a2e2e3b20fa9fdb3e
5640235350f177b104ab4398e235eb8896f57eb2:
title: 'reset: Fix crash when freeing non-existent optional resets'
mainline: 4a6756f56bcf8e64c87144a626ce53aea4899c0e
af2908904edad8cf9715b47b0e79908a95da3647:
title: 'net/rose: fix races in rose_kill_by_device()'
mainline: 64b8bc7d5f1434c636a40bdcfcd42b278d1714be
69af869a0796b07ba9c970573b10c8973bd6d01f:
title: 'usb: musb: fix MUSB_QUIRK_B_DISCONNECT_99 handling'
mainline: b65ba0c362be665192381cc59e3ac3ef6f0dd1e1
5a4b3f5db7d61d2da0ba29c2c3093c3e2e6396c9:
title: 'Documentation: fix little inconsistencies'
mainline: 98bfa34462fb6af70b845d28561072f80bacdb9b
2913b0ba7f6e2d9cc248bd735c4355521ecfabc9:
title: 'irqchip/jcore-aic: Kill use of irq_create_strict_mappings()'
mainline: 5f8b938bd790cff6542c7fe3c1495c71f89fef1b
3e768cbf9216ce85725737f867956a8f2ca0a457:
title: 'irqchip/jcore-aic: Fix missing allocation of IRQ descriptors'
mainline: 4848229494a323eeaab62eee5574ef9f7de80374
b6e41b5c2379ff1395ca96b6386c05a32843c2a5:
title: 'nfc: constify several pointers to u8, char and sk_buff'
mainline: 3df40eb3a2ea58bf404a38f15a7a2768e4762cb0
45de6db884778ef4e93de4ae7280cd611d884504:
title: 'nfc: llcp: fix possible use of uninitialized variable in nfc_llcp_send_connect()'
mainline: 0d9b41daa5907756a31772d8af8ac5ff25cf17c1
3d857eb0919abf307085ca6832f0ac4d93a24a64:
title: 'hwrng: virtio - add an internal buffer'
mainline: bf3175bc50a3754dc427e2f5046e17a9fafc8be7
278c136276289aa6d3df570c3b062d9fbeedc89b:
title: 'hwrng: virtio - don''t wait on cleanup'
mainline: 2bb31abdbe55742c89f4dc0cc26fcbc8467364f6
97e7381a6f9194c77cd5459f7561193d45a0aa33:
title: 'hwrng: virtio - don''t waste entropy'
mainline: 5c8e933050044d6dd2a000f9a5756ae73cbe7c44
7d1e957a222bb21290e580049099d103ced9fd6a:
title: 'hwrng: virtio - always add a pending request'
mainline: 9a4b612d675b03f7fc9fa1957ca399c8223f3954
c3e52a599f7f62da2ad63e22410984d9aa60e10d:
title: 'hwrng: virtio - Fix race on data_avail and actual data'
mainline: ac52578d6e8d300dd50f790f29a24169b1edd26c
210c0fdcd3a63fbbdd4b235d76dc552da17a1c72:
title: 'i2c: xiic: Fix kerneldoc warnings'
mainline: bcc156e2289d35673928fecf85c798a9d55f0b14
95aabd146e569369fd59671b707d6ad8d51bc72d:
title: 'i2c: xiic: Add timeout to the rx fifo wait loop'
mainline: b4c119dbc300c7a6ee2da70d5c7ba14747b35142
f19338828b5ef9275e53a562253bba42620b8237:
title: 'i2c: xiic: Change code alignment to 1 space only'
mainline: 0a9336ee133deb39f962e16b5eca2a48fec4eb52
bcbf5ebbe44731241b63d63b49fd3f4603d64071:
title: 'i2c: xiic: Fix broken locking on tx_msg'
mainline: c119e7d00c916881913011e6f4c6ac349a41e4e2
afe1470c49177fb9d7144561e1d93f4c5a0d4bfc:
title: 'i2c: xiic: Defer xiic_wakeup() and __xiic_start_xfer() in xiic_process()'
mainline: 743e227a895923c37a333eb2ebf3e391f00c406d
8a0f82d628a2ab065fc339a5089d567ca7a42f49:
title: 'i2c: xiic: Don''t try to handle more interrupt events after error'
mainline: cb6e45c9a0ad9e0f8664fd06db0227d185dc76ab
e30d592833c874df29f606cd7bb8db5ee8a99415:
title: 'net: tcp_input: Neaten DBGUNDO'
mainline: 3934788a7b4df71f8bd7a2a1f1c0480f06a2076e
ee0c076e95ebf74f2ba1d795a624452176bcfbbe:
title: 'net: bcmgenet: Avoid calling platform_device_put() twice in bcmgenet_mii_exit()'
mainline: aa7365e19f8410659ec30503cd8ce866a176c9f4
e97f35bdb99bd53102c39a90471f8fdefa3c2cf4:
title: 'net: bcmgenet: Ensure MDIO unregistration has clocks enabled'
mainline: 1b5ea7ffb7a3bdfffb4b7f40ce0d20a3372ee405
116ca77c4c71f447185ebc9bbe1c71cd4d96c062:
title: 'ceph: define argument structure for handle_cap_grant'
mainline: a1c6b8358171c16db0f858a7fbb28aa574b07c09
fea2d6421e6233a9d7ab4b6017303d72b16aae7d:
title: 'ceph: don''t let check_caps skip sending responses for revoke msgs'
mainline: 257e6172ab36ebbe295a6c9ee9a9dd0fe54c1dc2
c3b84f9003a993627adf8fb99f2141488f56381e:
title: 'net: Replace the limit of TCP_LINGER2 with TCP_FIN_TIMEOUT_MAX'
mainline: f0628c524fd188c3f9418e12478dfdfadacba815
d5a497ae323bb6ab71d7b5eb8b9bae056f920033:
title: 'tcp: annotate data-races around tp->linger2'
mainline: 9df5335ca974e688389c875546e5819778a80d59
cd432e4c1b933a42f95ba14762afbcb1f932a9c0:
title: 'ext4: rename journal_dev to s_journal_dev inside ext4_sb_info'
mainline: ee7ed3aa0f08621dbf897d2a98dc6f2c7e7d0335
3cacee45e38c0c293d4b4e8b44eaa0ffaf503f72:
title: 'ext4: Fix reusing stale buffer heads from last failed mounting'
mainline: 26fb5290240dc31cae99b8b4dd2af7f46dfcba6b
e6dd1522cb62c19dd5d4131710f9d2992548efdf:
title: 'dlm: cleanup plock_op vs plock_xop'
mainline: bcbb4ba6c9ba81e6975b642a2cade68044cd8a66
f6f6a0dec0c6cb62d700e6a1b979b67be8d1ef06:
title: 'dlm: rearrange async condition return'
mainline: a800ba77fd285c6391a82819867ac64e9ab3af46
95ba3c92f307d3017a4cad2ac37cb94e3ed6e799:
title: 'fs: dlm: interrupt posix locks only when process is killed'
mainline: 59e45c758ca1b9893ac923dd63536da946ac333b
a8a61debd3e4dd682b5661b5dd0230622bec22ed:
title: 'btrfs: simplify IS_ERR/PTR_ERR checks'
mainline: 8d9e220ca0844bf75b98cb5b8e2c25d203c0d0f6
aa11e1e3cda3cf7e6b8664b93d5af3eaaad8a98d:
title: 'btrfs: check for commit error at btrfs_attach_transaction_barrier()'
mainline: b28ff3a7d7e97456fd86b68d24caa32e1cfa7064
eff17aaaaced7877e833d19a0bc679651632c109:
title: 'scsi: zfcp: workqueue: set description for port work items with their WWPN as context'
mainline: 5c750d58e9d78987e2bda6b65441e6f6b961a01e
550d8906f7a311955da9e7ec57e92a79ad9b1044:
title: 'scsi: zfcp: Defer fc_rport blocking until after ADISC response'
mainline: e65851989001c0c9ba9177564b13b38201c0854c
b54ef4914a396bf6da749e33036fab3b2a84368e:
title: 'PM / wakeirq: support enabling wake-up irq after runtime_suspend called'
mainline: 259714100d98b50bf04d36a21bf50ca8b829fc11
16853250e57a8a89839d7f9a5628f433a9c9164f:
title: 'PM: sleep: wakeirq: fix wake irq arming'
mainline: 8527beb12087238d4387607597b4020bc393c4b4
d27937e2279cf13e7b7c57ff5c0a6636ca6eea50:
title: 'mmc: meson-gx: remove useless lock'
mainline: 83076d2268c72d123f3d1eaf186a9f56ec1b943a
c6bedc5607e55567e777c28af26c57303b08e129:
title: 'mmc: meson-gx: remove redundant mmc_request_done() call from irq context'
mainline: 3c40eb8145325b0f5b93b8a169146078cb2c49d6
70868b0c559cc25c2a841c113293a6afc4538d01:
title: 'nfsd4: kill warnings on testing stateids with mismatched clientids'
mainline: 663e36f07666ff924012defa521f88875f6e5402
bdc1664459d8064eb02ddb76c315dafb6b571f0e:
title: 'nfsd: Remove incorrect check in nfsd4_validate_stateid'
mainline: f75546f58a70da5cfdcec5a45ffc377885ccbee8
a56312f49990de12ec7b3d966bbf8644d89b8187:
title: 'dlm: improve plock logging if interrupted'
mainline: bcfad4265cedf3adcac355e994ef9771b78407bd
10d746f6180ca8a78a5f370181ad6c14f587f0dc:
title: 'dlm: replace usage of found with dedicated list iterator variable'
mainline: dc1acd5c94699389a9ed023e94dd860c846ea1f6
127999b1f810d0421aa7e553e9cdcde318b1887f:
title: 'fs: dlm: add pid to debug log'
mainline: 19d7ca051d303622c423b4cb39e6bde5d177328b
e6bfc1367e60133389f38b4f8e6a0639a27bea78:
title: 'fs: dlm: change plock interrupted message to debug again'
mainline: ea06d4cabf529eefbe7e89e3a8325f1f89355ccd
0f6305457b27e01b6a5e4f486d3f49c34bab496e:
title: 'fs: dlm: use dlm_plock_info for do_unlock_close'
mainline: 4d413ae9ced4180c0e2114553c3a7560b509b0f8
0b0e05f21ee5f823cc062c2df8a27f292c56007c:
title: 'fs: dlm: fix mismatch of plock results from userspace'
mainline: 57e2c2f2d94cfd551af91cedfa1af6d972487197
050f94118e4753e12230982cfc021116d37764c6:
title: 'MIPS: cpu-features: Enable octeon_cache by cpu_type'
mainline: f641519409a73403ee6612b8648b95a688ab85c2
129c199aa0d9d5d4e0c1c784c21e0cb9de98c738:
title: 'MIPS: cpu-features: Use boot_cpu_type for CPU type based features'
mainline: 5487a7b60695a92cf998350e4beac17144c91fcd
ae7ad73420b84636f3c3ef56ab3395e443de7bc8:
title: 'Revert "tty: serial: fsl_lpuart: drop earlycon entry for i.MX8QXP"'
mainline: 4e9679738a918d8a482ac6a2cb2bb871f094bb84
38c48e8208a3aa72af922e6066fff0ccc59059f6:
title: 'tty: serial: fsl_lpuart: add earlycon for imx8ulp platform'
mainline: e0edfdc15863ec80a1d9ac6e174dbccc00206dd0
790ba10b696a0644d0d674ccfada612841160654:
title: 'fbdev: Improve performance of sys_imageblit()'
mainline: 6f29e04938bf509fccfad490a74284cf158891ce
000e6839ff5a15a30db490549ac2a4b6fd7897f7:
title: 'fbdev: Fix sys_imageblit() for arbitrary image widths'
mainline: 61bfcb6a3b981e8f19e044ac8c3de6edbe6caf70
fd2daf94457d373bfc3bec7848c016f7eda8437d:
title: 'fbdev: fix potential OOB read in fast_imageblit()'
mainline: c2d22806aecb24e2de55c30a06e5d6eb297d161d
a975f7d7c8a9b294e140eb3ee7decc85a1eef8cf:
title: 'net: remove bond_slave_has_mac_rcu()'
mainline: 8b0fdcdc3a7d44aff907f0103f5ffb86b12bfe71
7fb02a7f7d83ee430f670597abb8d6a6d10f0594:
title: 'bonding: fix macvlan over alb bond support'
mainline: e74216b8def3803e98ae536de78733e9d7f3b109
43a1a81dc41cc370c7d9d22d9643a2aafcd70416:
title: 'mwifiex: drop ''set_consistent_dma_mask'' log message'
mainline: f7369179ad32000973fc7a0a76603e0b41792b52
7a73825cdd948a89445ff03f858517d49ef06194:
title: 'mwifiex: switch from ''pci_'' to ''dma_'' API'
mainline: 4cf975f640fefdfdf6168a79e882558478ce057a
e32af8cb49f42d5ab30497922feca00033a72966:
title: 'wifi: mwifiex: fix error recovery in PCIE buffer descriptor management'
mainline: 288c63d5cb4667a51a04668b3e2bb0ea499bc5f4
aa5a8b2a894434b1369882e2fd19cd63508998fc:
title: 'ath9k: use irqsave() in USB''s complete callback'
mainline: 84a0d4669c8fdbe6e3e23937c5083af99a1946f2
38af08a8cd605518c36e54d4d2b1d60cffc3bfd4:
title: 'wifi: ath9k: fix races between ath9k_wmi_cmd and ath9k_wmi_ctrl_rx'
mainline: b674fb513e2e7a514fcde287c0f73915d393fdb6
07da16c6e55bc394a29ba4f4da176e96d9ace43d:
title: 'ARM: dts: BCM5301X: Harmonize EHCI/OHCI DT nodes name'
mainline: 74abbfe99f43eb7466d26d9e48fbeb46b8f3d804
73b5bae64a17666626cf1ced507adb32e1a36609:
title: 'ARM: dts: BCM53573: Drop nonexistent #usb-cells'
mainline: 05d2c3d552b8c92fc397377d9d1112fc58e2cd59
067937ca477d5457ee6b6ec835436a3b27ac2e8e:
title: 'drm/tegra: Remove superfluous error messages around platform_get_irq()'
mainline: d12919bb5da571ec50588ef97683d37e36dc2de5
5bf51969adad6e4fd5a282b05a71cb2351e400ea:
title: 'drm/tegra: dpaux: Fix incorrect return value of platform_get_irq'
mainline: 2a1ca44b654346cadfc538c4fb32eecd8daf3140
5e192852448d0cec109afdff412214de340d34e8:
title: 'dlm: fix plock lookup when using multiple lockspaces'
mainline: 7c53e847ff5e97f033fdd31f71949807633d506b
8b6eebfbe7e2405bb0024e56b604d9ca3782eaaa:
title: 'sc16is7xx: Set iobase to device index'
mainline: 5da6b1c079e6804a81e63ab8337224cbd2148c91
a86abf7ed3e585714e068e259ef50800027bb467:
title: 'serial: sc16is7xx: fix broken port 0 uart init'
mainline: 2861ed4d6e6d1a2c9de9bf5b0abd996c2dc673d0
8e8afe4f1480bc9982913b2258a6b1648dfae121:
title: 'staging: typec: tcpm: Document data structures'
mainline: 98076fa64a05dd2bda3a9f38e171bade15ab507d
34c3c637cad40733d10132f69f9cc2debd66e33e:
title: 'staging: typec: fix endianness mismatch identified by sparse'
mainline: 81948cbcf12cb16a6f5fad9c86233a596e2f47ab
19fac3944e5c31df98b276221f2ebbccc1b6d415:
title: 'usb: typec: add fwnode to tcpc'
mainline: 5e85a04c8c0d271d7561a770b85741f186398868
f62619a3cf799ec41e764b406bbdaf660cc332f5:
title: 'usb: typec: tcpci: clear the fault status bit'
mainline: 23e60c8daf5ec2ab1b731310761b668745fcf6ed
a14cb6e28662143de003bde6a3a485bda15f0507:
title: 'scsi: lpfc: remove redundant null check on eqe'
mainline: 858e51e8cbe11a8c59b24aaf4cb40f7f4e7a2feb
47e59bc230b9b9cabac05b6b9c4ee937d25e3663:
title: 'scsi: qla2xxx: Reinstate module parameter ql2xenablemsix'
mainline: e7240af5108fc8b068b1b21988e48f0c5005cae6
0b48bb34020820ef4cc3a1db955cb8da14378547:
title: 'scsi: qla2xxx: Add option for use reserve exch for ELS'
mainline: 9ecf0b0dd5b934a89eeaa15723d10beb6c33074c
04b6abf7d75816455738721e49e3ee2e7e1cf2cf:
title: 'scsi: qla2xxx: Add protection mask module parameters'
mainline: 7855d2ba1172d716d96a628af7c5bafa5725ac57
fee054173692099ed2621b3b40631837db73a0be:
title: 'scsi: qla2xxx: Remove unsupported ql2xenabledif option'
mainline: e9105c4b7a9208a21a9bda133707624f12ddabc2
4b3db74257bb92b7e865c56462749fb0885ba92b:
title: 'ext4: remove the ''group'' parameter of ext4_trim_extent'
mainline: bd2eea8d0a6b6a9aca22f20bf74f73b71d8808af
9dd4bb24589fa1264649a51dcab190a086a40b2a:
title: 'ext4: add new helper interface ext4_try_to_trim_range()'
mainline: 6920b3913235f517728bb69abe9b39047a987113
c1932c2190bc4572ef8ab809bf8ca96d5cb3e963:
title: 'ext4: mark group as trimmed only if it was fully scanned'
mainline: d63c00ea435a5352f486c259665a4ced60399421
12056cb85b8326928109f708af452e9a5e67b08d:
title: 'ALSA: hda: Add Intel NUC7i3BNB to the power_save blacklist'
mainline: dd6dd5365404a31292715e6f54184f47f9b6aca5
983c8163037c25396f4705649175f03a8655b2ec:
title: 'ALSA: hda - add Lenovo IdeaCentre B550 to the power_save_blacklist'
mainline: 721f1e6c1fd137e7e2053d8e103b666faaa2d50c
17021e3657fa24eb0c8fceaacdde318962cd0dbe:
title: 'ALSA: hda: Disable power save for solving pop issue on Lenovo ThinkCentre M70q'
mainline: 057a28ef93bdbe84326d34cdb5543afdaab49fe1
a3a38c97cc8a24d335da2ab40c1f41bd90f4e6e3:
title: 'libata: Add new med_power_with_dipm link_power_management_policy setting'
mainline: f4ac6476945ff62939420bcf8266e39f8d5d54bd
7fc967f723e63bfb05694a5042aa7a931740faaa:
title: 'libata: make ata_port_type const'
mainline: 8df82c13a3756f831b0d748226ce932515728e04
543828f9c9d5194b02dd0f9148a36e1959f44786:
title: 'ata: libata-core: Do not register PM operations for SAS ports'
mainline: 75e2bd5f1ede42a2bc88aa34b431e1ace8e0bea0
481af7a82bed32501d1db0d13469032db4bd6844:
title: 'net: nfc: fix races in nfc_llcp_sock_get() and nfc_llcp_sock_get_sn()'
mainline: 31c07dffafce914c1d1543c135382a11ff058d93
759b99e2744b81d7c570679a8055bf3b9ce8e23a:
title: 'tcp: Namespace-ify sysctl_tcp_early_retrans'
mainline: 2ae21cf527da0e5cf9d7ee14bd5b0909bb9d1a75
5069afc313ab712506eb20ea71ba74f4fc9fe69c:
title: 'tcp: fix excessive TLP and RACK timeouts from HZ rounding'
mainline: 1c2709cfff1dedbb9591e989e2f001484208d914
9908f81a7a4d69b2cab427061bbe8270e4ee9ec4:
title: 'tcp: batch tcp_net_metrics_exit'
mainline: 789e6ddb0b2fb5d5024b760b178a47876e4de7a6
96f264346322c9e8e8fcd1c2309484e63b63b994:
title: 'tcp_metrics: add missing barriers on delete'
mainline: cbc3a153222805d65f821e10f4f78b6afce06f86
f07ac9ebc8bb931f7d3f44d21a0005f329fab7d8:
title: 'regmap: Allow missing device in regmap_name_read_file()'
mainline: 12ae3808c160b7be0de3c633ac4cbe8c5f2937bf
08e96d4f68e8ac7d926c5cbc681f8a51c395d55a:
title: 'regmap: debugfs: Fix a erroneous check after snprintf()'
mainline: d3601857e14de6369f00ae19564f1d817d175d19
926415a94426a5f06cae0bf580991003ad6cc541:
title: 'leds: pwm: simplify if condition'
mainline: b43a8f01fccbfdddbc7f9b2bbad11b7db3fda4e1
e1f59ea64f2ccca230bac4ab7735707479719591:
title: 'leds: pwm: convert to atomic PWM API'
mainline: dd47a83453e4a5b0d6a91fe702b7fbc1984fb610
99196523a0932a647a11e106f7b275ed5eb116bb:
title: 'leds: pwm: Don''t disable the PWM when the LED should be off'
mainline: 76fe464c8e64e71b2e4af11edeef0e5d85eeb6aa
d227179970abfcde03c773743472af8fef544327:
title: 'ledtrig-cpu: Limit to 8 CPUs'
mainline: abcc131292aa8c7de2c5f0ed76a717436c21de63
eaf18b187c77c62754fb9a79704b1c9f39289058:
title: 'leds: trigger: ledtrig-cpu:: Fix ''output may be truncated'' issue for ''cpu'''
mainline: ff50f53276131a3059e8307d11293af388ed2bcd
2a3d11b71743d93a48fb1fca8b6a5b50c43b6a5f:
title: 'tools: iio: privatize globals and functions in iio_generic_buffer.c file'
mainline: ebe5112535b5cf389ca7d337cf6a0c1d885f9880
ff9a3a01f25e1bed4d58ac67e97d7838d97e5055:
title: 'tools: iio: iio_generic_buffer: Fix some integer type and calculation'
mainline: 49d736313d0975ddeb156f4f59801da833f78b30
d84e1a8ae24bda75c5d07633c00957a31e762cf2:
title: 'tools: iio: iio_generic_buffer ensure alignment'
mainline: 2d3dff577dd0ea8fe9637a13822f7603c4a881c8
88750343019d107c221ac35bfcdf0712acca028a:
title: 'pwm: sti: Avoid conditional gotos'
mainline: fd3ae02bb66f091e55f363d32eca7b4039977bf5
a2afa76cd56cb36cce6df2844d1bd50fe7cd3326:
title: 'pwm: sti: Reduce number of allocations and drop usage of chip_data'
mainline: 2d6812b41e0d832919d72c72ebddf361df53ba1b
1167e186dab9a6e4f145c0200c945e288dc8915e:
title: 'hv_netvsc: use reciprocal divide to speed up percent calculation'
mainline: a7f99d0f2bbfe3b42ce398cdd37a97762e72cb56
e72642bece884f0adc7e84170b0cd593ec099dc3:
title: 'hv_netvsc: Fix race of register_netdevice_notifier and VF register'
mainline: 85520856466ed6bc3b1ccb013cddac70ceb437db
5efc9b330da9c5d031ff4a2c9ea440b8a4763a4d:
title: 'ALSA: hda/realtek - Add support for ALC1220'
mainline: 0202f5cd9aab127355f6b1de74058a670424d48a
717887a04ab8cc073b017de117f49a6f7561fc86:
title: 'ALSA: hda/realtek - Clevo P950ER ALC1220 Fixup'
mainline: 2f0d520a1a73555ac51c19cd494493f60b4c1cea
7c1753de173dac57ee577b2879d785c3f50ac923:
title: 'ALSA: hda/realtek - Headset microphone and internal speaker support for System76 oryp5'
mainline: 7f665b1c3283aae5b61843136d0a8ee808ba3199
343b68d0d5deba6a0a5ac71c07bbbca09996bc67:
title: 'ALSA: hda/realtek - Add quirk for Tuxedo XC 1509'
mainline: 80690a276f444a68a332136d98bfea1c338bc263
c05c727992661edfc7a751a2c548cbdf4b5b1445:
title: 'ALSA: hda/realtek: Enable audio jacks of ASUS D700SA with ALC887'
mainline: ca184355db8e60290fa34bf61c13308e6f4f50d3
695490e53275fe3e3ec200eaa23ea9a12480a960:
title: 'ALSA: hda/realtek - Fix microphone noise on ASUS TUF B550M-PLUS'
mainline: 9bfa7b36343c7d84370bc61c9ed774635b05e4eb
6ef8acf68b6eca9bc92f3245661611cade8c2803:
title: 'hfsplus: unmap the page in the "fail_page" label'
mainline: f5b23d6704e478b5a97dbba5df9dea96a9cbf847
ad3319dd44140ca9ce30a141ef6554eba430567e:
title: 'ALSA: hda/realtek: Headset Mic VREF to 100%'
mainline: baaacbff64d9f34b64f294431966d035aeadb81c
2d92e5282406723408007792521bae223b4957ab:
title: 's390/mm: fix phys vs virt confusion in mark_kernel_pXd() functions family'
mainline: 3784231b1e091857bd129fd9658a8b3cedbdcd58
617738d9f53ecf196289eff667689692ea688992:
title: 's390/cmma: fix detection of DAT pages'
mainline: 44d93045247661acbd50b1629e62f415f2747577
c3b3e5c5fd6af9babc0ae2c40f8dedcb5a971246:
title: 'mtd: cfi_cmdset_0001: Support the absence of protection registers'
mainline: b359ed5184aebf9d987e54abc5dae7ac03ed29ae
2d6c830b5b74502a3489e71649bed1a5aa3a0126:
title: 'mtd: cfi_cmdset_0001: Byte swap OTP info'
mainline: 565fe150624ee77dc63a735cc1b3bff5101f38a3
aaeb68749011877e3f27f12a2074030416c6e87b:
title: 'netfilter: xt_owner: Add supplementary groups option'
mainline: ea6cc2fd8a2b89ab6dcd096ba6dbc1ecbdf26564
c5bb4c9e5197e11029e4e0139bd9f3b418583b8c:
title: 'netfilter: xt_owner: Fix for unsafe access of sk->sk_socket'
mainline: 7ae836a3d630e146b732fe8ef7d86b243748751f
704e90cd5aea7b3d54272c79f4426c779c6b6206:
title: 'devcoredump : Serialize devcd_del work'
mainline: 01daccf748323dfc61112f474cf2ba81015446b0
e54af988b2b852fdd3e49a71b74d9f390a3ee481:
title: 'devcoredump: Send uevent once devcd is ready'
mainline: af54d778a03853801d681c98c0c2a6c316ef9ca7
cf43b6d64c1240c2477e73960d176a4409fafa96:
title: Add Acer Aspire Ethos 8951G model quirk
mainline: 00066e9733f629e536f6b7957de2ce11a85fe15a
5b475c173f7dbe4a7d4d30b532fe625a48789159:
title: 'ALSA: hda/realtek - More constifications'
mainline: 6b0f95c49d890440c01a759c767dfe40e2acdbf2
52db37c601def3669f3ee12815fad45141c18df6:
title: 'ALSA: hda/realtek - Add Headset Mic supported for HP cPC'
mainline: 5af29028fd6db9438b5584ab7179710a0a22569d
1c02322babbd2995262bc53b701915ae21e11822:
title: 'ALSA: hda/realtek - Enable headset mic of Acer X2660G with ALC662'
mainline: d858c706bdca97698752bd26b60c21ec07ef04f2
8d8b693c3b4a5f1c7888bb772761b0ce2d8ec1e1:
title: 'ALSA: hda/realtek - Enable the headset of Acer N50-600 with ALC662'
mainline: a124458a127ccd7629e20cd7bae3e1f758ed32aa
7f6644d4ab24b8185cac62d3dc4e98c3442c32aa:
title: 'ALSA: hda/realtek - The front Mic on a HP machine doesn''t work'
mainline: 148ebf548a1af366fc797fcc7d03f0bb92b12a79
a12a6438dd344448ea3ed430f6304b5179c07e0d:
title: 'ALSA: hda/realtek: Fix the mic type detection issue for ASUS G551JW'
mainline: a3fd1a986e499a06ac5ef95c3a39aa4611e7444c
0cef3d6365178cae90fa6873bd24e123c4991b79:
title: 'ALSA: hda/realtek - Add headset Mic support for Lenovo ALC897 platform'
mainline: d7f32791a9fcf0dae8b073cdea9b79e29098c5f4
9c603c45f7a062f3f95cf63f2bd30656aa7e4869:
title: 'ALSA: hda/realtek - ALC897 headset MIC no sound'
mainline: fe6900bd8156467365bd5b976df64928fdebfeb0
2b52c1d89d88fe46c263396d6a03bdaa946f80ab:
title: 'ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB'
mainline: 4bf5bf54476dffe60e6b6d8d539f67309ff599e2
c3378d349af0b3cfa731f780ce4f4a5b32e98326:
title: 'ALSA: hda/realtek: Enable headset onLenovo M70/M90'
mainline: 4ca110cab46561cd74a2acd9b447435acb4bec5f
86d3937af58b1883a4864c1920dc1f0a94d1ec1a:
title: 'ALSA: hda/realtek: Enable headset on Lenovo M90 Gen5'
mainline: 6f7e4664e597440dfbdb8b2931c561b717030d07
97ad753fc2535621640dceb6901fee3a189579b0:
title: 'staging: android: ashmem: Remove use of unlikely()'
mainline: 59848d6aded59a644bd3199033a9dc5a66d528f5
cc4df094fab7ff5c72080d3139993b593e2eecc0:
title: 'net: warn if gso_type isn''t set for a GSO SKB'
mainline: 1d155dfdf50efc2b0793bce93c06d1a5b23d0877
2dd12d177fc0e880d57f29006e9789827505ef32:
title: 'driver: staging: count ashmem_range into SLAB_RECLAIMBLE'
mainline: 3989f5a5f81c97732f9e3b3ae2d1d7923f6e7653
c1b444cfadfcaa4febb4cd7c2485c7190b26cd21:
title: 'net: check dev->gso_max_size in gso_features_check()'
mainline: 24ab059d2ebd62fdccc43794796f6ffbabe49ebc
c79ed0e007099e62a54ac8cf46ef510d539eeb85:
title: 'scsi: bnx2fc: Remove set but not used variable ''oxid'''
mainline: efcbe99818ac9bd93ac41e8cf954e9aa64dd9971
041b7f92ebe014ec914efa59e83f72ab1dcbd335:
title: 'scsi: bnx2fc: Fix skb double free in bnx2fc_rcv()'
mainline: 08c94d80b2da481652fb633e79cbc41e9e326a91
abd2c4dd779190715bddb438c4cd90a8ce61fe7f:
title: 'ANDROID: binder: Add thread->process_todo flag.'
mainline: 148ade2c4d4f46b3ecc1ddad1c762371e8708e35
aaf0101b79c4375c4eafff78d1e4887b273681b2:
title: 'binder: signal epoll threads of self-work'
mainline: 97830f3c3088638ff90b20dfba2eb4d487bf14d7

View File

@ -94,10 +94,10 @@ Note: More extensive information for getting started with ext4 can be
* ability to pack bitmaps and inode tables into larger virtual groups via the
flex_bg feature
* large file support
* Inode allocation using large virtual block groups via flex_bg
* inode allocation using large virtual block groups via flex_bg
* delayed allocation
* large block (up to pagesize) support
* efficient new ordered mode in JBD2 and ext4(avoid using buffer head to force
* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
the ordering)
[1] Filesystems with a block size of 1k may see a limit imposed by the
@ -105,7 +105,7 @@ directory hash tree having a maximum depth of two.
2.2 Candidate features for future inclusion
* Online defrag (patches available but not well tested)
* online defrag (patches available but not well tested)
* reduced mke2fs time via lazy itable initialization in conjunction with
the uninit_bg feature (capability to do this is available in e2fsprogs
but a kernel thread to do lazy zeroing of unused inode table blocks
@ -602,7 +602,7 @@ Table of Ext4 specific ioctls
bitmaps and inode table, the userspace tool thus
just passes the new number of blocks.
EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
(like i_blocks, i_size, i_flags, ...) from
the specified inode with inode
EXT4_BOOT_LOADER_INO (#5). This is typically

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 343
SUBLEVEL = 344
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -267,7 +267,7 @@
interrupt-parent = <&gic>;
ehci: ehci@21000 {
ehci: usb@21000 {
#usb-cells = <0>;
compatible = "generic-ehci";
@ -289,7 +289,7 @@
};
};
ohci: ohci@22000 {
ohci: usb@22000 {
#usb-cells = <0>;
compatible = "generic-ohci";

View File

@ -130,7 +130,7 @@
#address-cells = <1>;
#size-cells = <1>;
ehci: ehci@4000 {
ehci: usb@4000 {
compatible = "generic-ehci";
reg = <0x4000 0x1000>;
interrupt-parent = <&gic>;
@ -150,9 +150,7 @@
};
};
ohci: ohci@d000 {
#usb-cells = <0>;
ohci: usb@d000 {
compatible = "generic-ohci";
reg = <0xd000 0x1000>;
interrupt-parent = <&gic>;

View File

@ -48,7 +48,7 @@ enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
* Different from other insn uses imm8, the real addressing offset of
* STRD in T32 encoding should be imm8 * 4. See ARMARM description.
*/
enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
struct arch_probes_insn *asi,
const struct decode_header *h)
{

View File

@ -244,7 +244,7 @@ singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
* kprobe, and that level is reserved for user kprobe handlers, so we can't
* risk encountering a new kprobe in an interrupt handler.
*/
void __kprobes kprobe_handler(struct pt_regs *regs)
static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur;
struct kprobe_ctlblk *kcb;

View File

@ -158,8 +158,6 @@ __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
}
}
extern void kprobe_handler(struct pt_regs *regs);
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{

View File

@ -780,7 +780,7 @@ static const char coverage_register_lookup[16] = {
[REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP,
};
unsigned coverage_start_registers(const struct decode_header *h)
static unsigned coverage_start_registers(const struct decode_header *h)
{
unsigned regs = 0;
int i;

View File

@ -456,3 +456,7 @@ void kprobe_thumb32_test_cases(void);
#else
void kprobe_arm_test_cases(void);
#endif
void __kprobes_test_case_start(void);
void __kprobes_test_case_end_16(void);
void __kprobes_test_case_end_32(void);

View File

@ -51,7 +51,7 @@
id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
};
usb_p1_vbus: regulator@0 {
usb_p1_vbus: regulator-usb-p1 {
compatible = "regulator-fixed";
regulator-name = "usb_vbus";
regulator-min-microvolt = <5000000>;
@ -60,7 +60,7 @@
enable-active-high;
};
usb_p0_vbus: regulator@1 {
usb_p0_vbus: regulator-usb-p0 {
compatible = "regulator-fixed";
regulator-name = "vbus";
regulator-min-microvolt = <5000000>;

View File

@ -769,6 +769,9 @@
#size-cells = <1>;
ranges;
interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hs_phy_irq";
clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
<&gcc GCC_USB20_MASTER_CLK>,
<&gcc GCC_USB20_MOCK_UTMI_CLK>,

View File

@ -172,6 +172,7 @@ static struct platform_device db1x00_audio_dev = {
/******************************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
@ -379,6 +380,7 @@ static struct platform_device db1100_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
@ -482,9 +484,11 @@ static struct platform_device *db1000_devs[] = {
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
&db1000_irda_dev,
#endif
};
int __init db1000_dev_setup(void)

View File

@ -341,6 +341,7 @@ static struct platform_device db1200_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
@ -601,6 +602,7 @@ static struct platform_device pb1200_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -768,7 +770,9 @@ static struct platform_device db1200_audiodma_dev = {
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
@ -779,7 +783,9 @@ static struct platform_device *db1200_devs[] __initdata = {
};
static struct platform_device *pb1200_devs[] __initdata = {
#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
#endif
};
/* Some peripheral base addresses differ on the PB1200 */

View File

@ -448,6 +448,7 @@ static struct platform_device db1300_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
@ -626,6 +627,7 @@ static struct platform_device db1300_sd0_dev = {
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -756,8 +758,10 @@ static struct platform_device *db1300_dev[] __initdata = {
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,

View File

@ -73,7 +73,24 @@
#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
@ -294,7 +311,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \

View File

@ -701,7 +701,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
pte_t *ptep, entry, old_pte;
pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@ -774,7 +774,6 @@ retry:
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
old_pte = *ptep;
set_pte(ptep, entry);
err = 0;

View File

@ -118,7 +118,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || pmd_large(*pmd))
continue;
page = virt_to_page(pmd_val(*pmd));
page = phys_to_page(pmd_val(*pmd));
set_bit(PG_arch_1, &page->flags);
} while (pmd++, addr = next, addr != end);
}
@ -136,8 +136,8 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
if (pud_none(*pud) || pud_large(*pud))
continue;
if (!pud_folded(*pud)) {
page = virt_to_page(pud_val(*pud));
for (i = 0; i < 3; i++)
page = phys_to_page(pud_val(*pud));
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pmd(pud, addr, next);
@ -157,8 +157,8 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
if (p4d_none(*p4d))
continue;
if (!p4d_folded(*p4d)) {
page = virt_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++)
page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pud(p4d, addr, next);
@ -179,8 +179,8 @@ static void mark_kernel_pgd(void)
if (pgd_none(*pgd))
continue;
if (!pgd_folded(*pgd)) {
page = virt_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++)
page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);

View File

@ -49,7 +49,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
#endif
return 0;
}
@ -73,7 +73,8 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
* Populate HT related information from sub-leaf level 0.
*/
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1;

View File

@ -32,7 +32,8 @@ int amiga_partition(struct parsed_partitions *state)
unsigned char *data;
struct RigidDiskBlock *rdb;
struct PartitionBlock *pb;
int start_sect, nr_sects, blk, part, res = 0;
sector_t start_sect, nr_sects;
int blk, part, res = 0;
int blksize = 1; /* Multiplier for disk block size */
int slot = 1;
char b[BDEVNAME_SIZE];
@ -100,14 +101,14 @@ int amiga_partition(struct parsed_partitions *state)
/* Tell Kernel about it */
nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
be32_to_cpu(pb->pb_Environment[9])) *
nr_sects = ((sector_t)be32_to_cpu(pb->pb_Environment[10]) + 1 -
be32_to_cpu(pb->pb_Environment[9])) *
be32_to_cpu(pb->pb_Environment[3]) *
be32_to_cpu(pb->pb_Environment[5]) *
blksize;
if (!nr_sects)
continue;
start_sect = be32_to_cpu(pb->pb_Environment[9]) *
start_sect = (sector_t)be32_to_cpu(pb->pb_Environment[9]) *
be32_to_cpu(pb->pb_Environment[3]) *
be32_to_cpu(pb->pb_Environment[5]) *
blksize;

View File

@ -876,6 +876,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
binder_enqueue_work_ilocked(work, &thread->todo);
/* (e)poll-based threads require an explicit wakeup signal when
* queuing their own work; they rely on these events to consume
* messages without I/O block. Without it, threads risk waiting
* indefinitely without handling the work.
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
thread->pid == current->pid && !thread->process_todo)
wake_up_interruptible_sync(&thread->wait);
thread->process_todo = true;
}

View File

@ -3992,6 +3992,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
scontrol &= ~(0x1 << 8);
scontrol |= (0x6 << 8);
break;
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0) {
/* assume no restrictions on LPM transitions */
@ -5911,8 +5912,8 @@ void ata_host_resume(struct ata_host *host)
}
#endif
struct device_type ata_port_type = {
.name = "ata_port",
const struct device_type ata_port_type = {
.name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif

View File

@ -3462,9 +3462,9 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
* @r_failed_dev: out parameter for failed device
*
* Enable SATA Interface power management. This will enable
* Device Interface Power Management (DIPM) for min_power
* policy, and then call driver specific callbacks for
* enabling Host Initiated Power management.
* Device Interface Power Management (DIPM) for min_power and
* medium_power_with_dipm policies, and then call driver specific
* callbacks for enabling Host Initiated Power management.
*
* LOCKING:
* EH context.
@ -3510,7 +3510,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
if (policy != ATA_LPM_MIN_POWER && dipm) {
if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@ -3553,7 +3553,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ata_id_has_dipm(dev->id)) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);

View File

@ -106,10 +106,11 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
};
static const char *ata_lpm_policy_names[] = {
[ATA_LPM_UNKNOWN] = "max_performance",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MIN_POWER] = "min_power",
[ATA_LPM_UNKNOWN] = "max_performance",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
[ATA_LPM_MIN_POWER] = "min_power",
};
static ssize_t ata_scsi_lpm_store(struct device *device,

View File

@ -264,6 +264,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
};
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@ -281,7 +285,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
dev->type = &ata_port_type;
if (ap->flags & ATA_FLAG_SAS_HOST)
dev->type = &ata_port_sas_type;
else
dev->type = &ata_port_type;
dev->parent = parent;
dev->release = ata_tport_release;

View File

@ -46,12 +46,14 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
#define ATA_PORT_TYPE_NAME "ata_port"
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
extern struct device_type ata_port_type;
extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);

View File

@ -44,6 +44,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
*
*
*
* cpu0(X) cpu1(Y)
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* del_timer()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
*/
struct mutex mutex;
bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@ -103,7 +144,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mod_delayed_work(system_wq, &devcd->del_wk, 0);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
return count;
}
@ -131,7 +177,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
mutex_unlock(&devcd->mutex);
return 0;
}
@ -141,6 +192,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sprintf(buf, "%d\n", devcd_disabled);
}
/*
*
* disabled_store() worker()
* class_for_each_device(&devcd_class,
* NULL, NULL, devcd_free)
* ...
* ...
* while ((dev = class_dev_iter_next(&iter))
* devcd_del()
* device_del()
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, It looks like disabled_store() would be racing with parallely
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
*/
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@ -306,13 +381,17 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
if (device_add(&devcd->devcd_dev))
goto put_device;
@ -324,12 +403,15 @@ void dev_coredumpm(struct device *dev, struct module *owner,
"devcoredump"))
/* nothing - symlink will be missing */;
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:

View File

@ -24,8 +24,11 @@ extern void pm_runtime_remove(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED)
WAKE_IRQ_DEDICATED_MANAGED | \
WAKE_IRQ_DEDICATED_REVERSE)
#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;
@ -37,7 +40,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
extern void dev_pm_disable_wake_irq_check(struct device *dev);
extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP

View File

@ -598,6 +598,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
dev_pm_enable_wake_irq_complete(dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@ -632,7 +634,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@ -815,7 +817,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);

View File

@ -157,24 +157,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@ -206,7 +189,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@ -217,8 +200,57 @@ err_free:
return err;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
* dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
* with reverse enable ordering
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has a dedicated
* wake-up interrupt in addition to the device IO interrupt. It sets
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
* to enable dedicated wake-up interrupt after running the runtime suspend
* callback for @dev.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
@ -289,25 +321,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
enable_irq(wirq->irq);
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
* @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
void dev_pm_disable_wake_irq_check(struct device *dev)
void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
}
}
/**
* dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
* @dev: Device using the wake IRQ
*
* Enable wake IRQ conditionally based on status, mainly used if want to
* enable wake IRQ after running ->runtime_suspend() which depends on
* WAKE_IRQ_DEDICATED_REVERSE.
*
* Should be only called from rpm_suspend() path.
*/
void dev_pm_enable_wake_irq_complete(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
enable_irq(wirq->irq);
}
/**
@ -324,7 +387,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@ -347,7 +410,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}

View File

@ -40,6 +40,7 @@ static ssize_t regmap_name_read_file(struct file *file,
loff_t *ppos)
{
struct regmap *map = file->private_data;
const char *name = "nodev";
int ret;
char *buf;
@ -47,8 +48,11 @@ static ssize_t regmap_name_read_file(struct file *file,
if (!buf)
return -ENOMEM;
ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
if (ret < 0) {
if (map->dev && map->dev->driver)
name = map->dev->driver->name;
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
if (ret >= PAGE_SIZE) {
kfree(buf);
return ret;
}

View File

@ -250,8 +250,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
static struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,

View File

@ -17,6 +17,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <asm/barrier.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/scatterlist.h>
@ -30,71 +31,111 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
struct hwrng hwrng;
struct virtqueue *vq;
struct completion have_data;
char name[25];
unsigned int data_avail;
int index;
bool busy;
bool hwrng_register_done;
bool hwrng_removed;
/* data transfer */
struct completion have_data;
unsigned int data_avail;
unsigned int data_idx;
/* minimal size returned by rng_buffer_size() */
#if SMP_CACHE_BYTES < 32
u8 data[32];
#else
u8 data[SMP_CACHE_BYTES];
#endif
};
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
if (!virtqueue_get_buf(vi->vq, &len))
return;
smp_store_release(&vi->data_avail, len);
complete(&vi->have_data);
}
/* The host will fill any buffer we give it with sweet, sweet randomness. */
static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
static void request_entropy(struct virtrng_info *vi)
{
struct scatterlist sg;
sg_init_one(&sg, buf, size);
reinit_completion(&vi->have_data);
vi->data_idx = 0;
sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */
virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq);
}
static unsigned int copy_data(struct virtrng_info *vi, void *buf,
unsigned int size)
{
size = min_t(unsigned int, size, vi->data_avail);
memcpy(buf, vi->data + vi->data_idx, size);
vi->data_idx += size;
vi->data_avail -= size;
if (vi->data_avail == 0)
request_entropy(vi);
return size;
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
unsigned int chunk;
size_t read;
if (vi->hwrng_removed)
return -ENODEV;
if (!vi->busy) {
vi->busy = true;
reinit_completion(&vi->have_data);
register_buffer(vi, buf, size);
read = 0;
/* copy available data */
if (smp_load_acquire(&vi->data_avail)) {
chunk = copy_data(vi, buf, size);
size -= chunk;
read += chunk;
}
if (!wait)
return 0;
return read;
ret = wait_for_completion_killable(&vi->have_data);
if (ret < 0)
return ret;
/* We have already copied available entropy,
* so either size is 0 or data_avail is 0
*/
while (size != 0) {
/* data_avail is 0 but a request is pending */
ret = wait_for_completion_killable(&vi->have_data);
if (ret < 0)
return ret;
/* if vi->data_avail is 0, we have been interrupted
* by a cleanup, but buffer stays in the queue
*/
if (vi->data_avail == 0)
return read;
vi->busy = false;
chunk = copy_data(vi, buf + read, size);
size -= chunk;
read += chunk;
}
return vi->data_avail;
return read;
}
static void virtio_cleanup(struct hwrng *rng)
{
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
if (vi->busy)
wait_for_completion(&vi->have_data);
complete(&vi->have_data);
}
static int probe_common(struct virtio_device *vdev)
@ -130,6 +171,9 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
/* we always have a pending entropy request */
request_entropy(vi);
return 0;
err_find:
@ -145,9 +189,9 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
vi->data_idx = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);

View File

@ -445,10 +445,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
if (dpaux->irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return -ENXIO;
}
if (dpaux->irq < 0)
return dpaux->irq;
if (!pdev->dev.pm_domain) {
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");

View File

@ -55,31 +55,33 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
* @tx_msg: Messages from above to be sent
* @lock: Mutual exclusion
* @tx_pos: Current pos in TX message
* @nmsgs: Number of messages in tx_msg
* @state: See STATE_
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @dev: Pointer to device structure
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
* @tx_msg: Messages from above to be sent
* @lock: Mutual exclusion
* @tx_pos: Current pos in TX message
* @nmsgs: Number of messages in tx_msg
* @state: See STATE_
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
* @clk: Pointer to AXI4-lite input clock
*/
struct xiic_i2c {
struct device *dev;
void __iomem *base;
wait_queue_head_t wait;
struct i2c_adapter adap;
struct i2c_msg *tx_msg;
struct mutex lock;
unsigned int tx_pos;
unsigned int nmsgs;
enum xilinx_i2c_state state;
struct i2c_msg *rx_msg;
int rx_pos;
enum xiic_endian endianness;
struct device *dev;
void __iomem *base;
wait_queue_head_t wait;
struct i2c_adapter adap;
struct i2c_msg *tx_msg;
struct mutex lock;
unsigned int tx_pos;
unsigned int nmsgs;
enum xilinx_i2c_state state;
struct i2c_msg *rx_msg;
int rx_pos;
enum xiic_endian endianness;
struct clk *clk;
};
@ -169,6 +171,8 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
#define XIIC_PM_TIMEOUT 1000 /* ms */
/* timeout waiting for the controller to respond */
#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@ -179,7 +183,7 @@ struct xiic_i2c {
#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
static void xiic_start_xfer(struct xiic_i2c *i2c);
static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
/*
@ -260,17 +264,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
xiic_irq_en(i2c, mask);
}
static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
{
u8 sr;
unsigned long timeout;
timeout = jiffies + XIIC_I2C_TIMEOUT;
for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
if (time_after(jiffies, timeout)) {
dev_err(i2c->dev, "Failed to clear rx fifo\n");
return -ETIMEDOUT;
}
}
return 0;
}
static void xiic_reinit(struct xiic_i2c *i2c)
static int xiic_reinit(struct xiic_i2c *i2c)
{
int ret;
xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
/* Set receive Fifo depth to maximum (zero based). */
@ -283,12 +299,16 @@ static void xiic_reinit(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
/* make sure RX fifo is empty */
xiic_clear_rx_fifo(i2c);
ret = xiic_clear_rx_fifo(i2c);
if (ret)
return ret;
/* Enable interrupts */
xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
return 0;
}
static void xiic_deinit(struct xiic_i2c *i2c)
@ -368,6 +388,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
struct xiic_i2c *i2c = dev_id;
u32 pend, isr, ier;
u32 clr = 0;
int xfer_more = 0;
int wakeup_req = 0;
int wakeup_code = 0;
/* Get the interrupt Status from the IPIF. There is no clearing of
* interrupts in the IPIF. Interrupts must be cleared at the source.
@ -404,10 +427,16 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
*/
xiic_reinit(i2c);
if (i2c->rx_msg)
xiic_wakeup(i2c, STATE_ERROR);
if (i2c->tx_msg)
xiic_wakeup(i2c, STATE_ERROR);
if (i2c->rx_msg) {
wakeup_req = 1;
wakeup_code = STATE_ERROR;
}
if (i2c->tx_msg) {
wakeup_req = 1;
wakeup_code = STATE_ERROR;
}
/* don't try to handle other events */
goto out;
}
if (pend & XIIC_INTR_RX_FULL_MASK) {
/* Receive register/FIFO is full */
@ -441,8 +470,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
i2c->tx_msg++;
dev_dbg(i2c->adap.dev.parent,
"%s will start next...\n", __func__);
__xiic_start_xfer(i2c);
xfer_more = 1;
}
}
}
@ -456,11 +484,13 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
if (!i2c->tx_msg)
goto out;
if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
xiic_tx_space(i2c) == 0)
xiic_wakeup(i2c, STATE_DONE);
wakeup_req = 1;
if (i2c->nmsgs == 1 && !i2c->rx_msg &&
xiic_tx_space(i2c) == 0)
wakeup_code = STATE_DONE;
else
xiic_wakeup(i2c, STATE_ERROR);
wakeup_code = STATE_ERROR;
}
if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
/* Transmit register/FIFO is empty or ½ empty */
@ -484,7 +514,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
if (i2c->nmsgs > 1) {
i2c->nmsgs--;
i2c->tx_msg++;
__xiic_start_xfer(i2c);
xfer_more = 1;
} else {
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
@ -502,6 +532,13 @@ out:
dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
if (xfer_more)
__xiic_start_xfer(i2c);
if (wakeup_req)
xiic_wakeup(i2c, wakeup_code);
WARN_ON(xfer_more && wakeup_req);
mutex_unlock(&i2c->lock);
return IRQ_HANDLED;
}
@ -669,12 +706,28 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
}
static void xiic_start_xfer(struct xiic_i2c *i2c)
static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
{
int ret;
mutex_lock(&i2c->lock);
xiic_reinit(i2c);
__xiic_start_xfer(i2c);
ret = xiic_busy(i2c);
if (ret)
goto out;
i2c->tx_msg = msgs;
i2c->rx_msg = NULL;
i2c->nmsgs = num;
ret = xiic_reinit(i2c);
if (!ret)
__xiic_start_xfer(i2c);
out:
mutex_unlock(&i2c->lock);
return ret;
}
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@ -689,20 +742,19 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (err < 0)
return err;
err = xiic_busy(i2c);
if (err)
err = xiic_start_xfer(i2c, msgs, num);
if (err < 0) {
dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
goto out;
i2c->tx_msg = msgs;
i2c->nmsgs = num;
xiic_start_xfer(i2c);
}
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ)) {
mutex_lock(&i2c->lock);
err = (i2c->state == STATE_DONE) ? num : -EIO;
goto out;
} else {
mutex_lock(&i2c->lock);
i2c->tx_msg = NULL;
i2c->rx_msg = NULL;
i2c->nmsgs = 0;
@ -710,6 +762,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
goto out;
}
out:
mutex_unlock(&i2c->lock);
pm_runtime_mark_last_busy(i2c->dev);
pm_runtime_put_autosuspend(i2c->dev);
return err;
@ -809,7 +862,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
i2c->endianness = BIG;
xiic_reinit(i2c);
ret = xiic_reinit(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot xiic_reinit\n");
goto err_clk_dis;
}
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);

View File

@ -787,6 +787,12 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
/* leave out any TS related code if unreachable */
if (IS_REACHABLE(CONFIG_INPUT)) {
has_ts = of_property_read_bool(pdev->dev.of_node,
"has-touchscreen") || pdata;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
@ -794,11 +800,15 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
info->irq = irq;
irq = platform_get_irq(pdev, 1);
if (irq == -EPROBE_DEFER)
return irq;
if (has_ts) {
irq = platform_get_irq(pdev, 1);
if (irq == -EPROBE_DEFER)
return irq;
info->tsirq = irq;
info->tsirq = irq;
} else {
info->tsirq = -1;
}
info->dev = &pdev->dev;
@ -865,12 +875,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
/* leave out any TS related code if unreachable */
if (IS_REACHABLE(CONFIG_INPUT)) {
has_ts = of_property_read_bool(pdev->dev.of_node,
"has-touchscreen") || pdata;
}
if (pdata)
info->delay = pdata->delay;
else

View File

@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#define STX104_OUT_CHAN(chan) { \
@ -54,10 +55,12 @@ MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
/**
* struct stx104_iio - IIO device private data structure
* @lock: synchronization lock to prevent I/O race conditions
* @chan_out_states: channels' output states
* @base: base port address of the IIO device
*/
struct stx104_iio {
struct mutex lock;
unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
unsigned int base;
};
@ -160,9 +163,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 65535)
return -EINVAL;
mutex_lock(&priv->lock);
priv->chan_out_states[chan->channel] = val;
outw(val, priv->base + 4 + 2 * chan->channel);
mutex_unlock(&priv->lock);
return 0;
}
return -EINVAL;
@ -323,6 +329,8 @@ static int stx104_probe(struct device *dev, unsigned int id)
priv = iio_priv(indio_dev);
priv->base = base[id];
mutex_init(&priv->lock);
/* configure device for software trigger operation */
outb(0, base[id] + 9);

View File

@ -3211,7 +3211,6 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
tx->num_desc++;
if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
@ -3225,6 +3224,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
SDMA_MAP_NONE,
dd->sdma_pad_phys,
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
tx->num_desc++;
_sdma_close_tx(dd, tx);
return rval;
}

View File

@ -679,14 +679,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
tx->descp[tx->num_desc].qw[0] |=
SDMA_DESC0_LAST_DESC_FLAG;
tx->descp[tx->num_desc].qw[1] |=
dd->default_desc1;
u16 last_desc = tx->num_desc - 1;
tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
tx->descp[last_desc].qw[1] |= dd->default_desc1;
if (tx->flags & SDMA_TXREQ_F_URGENT)
tx->descp[tx->num_desc].qw[1] |=
(SDMA_DESC1_HEAD_TO_HOST_FLAG |
SDMA_DESC1_INT_REQ_FLAG);
tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
SDMA_DESC1_INT_REQ_FLAG);
}
static inline int _sdma_txadd_daddr(
@ -703,6 +702,7 @@ static inline int _sdma_txadd_daddr(
type,
addr, len);
WARN_ON(len > tx->tlen);
tx->num_desc++;
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
@ -714,7 +714,6 @@ static inline int _sdma_txadd_daddr(
_sdma_close_tx(dd, tx);
}
}
tx->num_desc++;
return rval;
}

View File

@ -189,15 +189,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
rx_desc->rx_cqe.done = isert_recv_done;
@ -209,7 +209,7 @@ dma_map_fail:
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
@ -230,7 +230,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
@ -414,10 +414,9 @@ isert_free_login_buf(struct isert_conn *isert_conn)
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
kfree(isert_conn->login_rsp_buf);
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
kfree(isert_conn->login_req_buf);
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
kfree(isert_conn->login_desc);
}
static int
@ -426,25 +425,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
GFP_KERNEL);
if (!isert_conn->login_req_buf)
if (!isert_conn->login_desc)
return -ENOMEM;
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
isert_conn->login_req_buf,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
isert_conn->login_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
goto out_free_login_req_buf;
isert_err("login_desc dma mapping error: %d\n", ret);
isert_conn->login_desc->dma_addr = 0;
goto out_free_login_desc;
}
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
ret = -ENOMEM;
goto out_unmap_login_req_buf;
goto out_unmap_login_desc;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
@ -461,11 +460,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
out_free_login_rsp_buf:
kfree(isert_conn->login_rsp_buf);
out_unmap_login_req_buf:
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
out_free_login_req_buf:
kfree(isert_conn->login_req_buf);
out_unmap_login_desc:
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
out_free_login_desc:
kfree(isert_conn->login_desc);
return ret;
}
@ -584,7 +583,7 @@ isert_connect_release(struct isert_conn *isert_conn)
ib_destroy_qp(isert_conn->qp);
}
if (isert_conn->login_req_buf)
if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
@ -974,17 +973,18 @@ isert_login_post_recv(struct isert_conn *isert_conn)
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.addr = isert_conn->login_desc->dma_addr +
isert_get_hdr_offset(isert_conn->login_desc);
sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
@ -1061,7 +1061,7 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
@ -1073,7 +1073,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
if (login->first_request) {
struct iscsi_login_req *login_req =
(struct iscsi_login_req *)&rx_desc->iscsi_header;
(struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
@ -1092,13 +1092,13 @@ isert_rx_login_req(struct isert_conn *isert_conn)
login->tsih = be16_to_cpu(login_req->tsih);
}
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
@ -1163,14 +1163,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
&rx_desc->data[0], imm_data_len);
isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
@ -1239,9 +1240,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
sg_nents, &rx_desc->data[0], unsol_data_len);
sg_nents, isert_get_data(rx_desc), unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
@ -1300,7 +1301,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
}
cmd->text_in_ptr = text_in;
memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
@ -1310,7 +1311,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
@ -1408,8 +1409,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
@ -1423,7 +1424,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
@ -1458,7 +1459,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
read_stag, read_va, write_stag, write_va);
ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
@ -1472,8 +1473,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
@ -1488,8 +1489,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
complete(&isert_conn->login_req_comp);
mutex_unlock(&isert_conn->mutex);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void

View File

@ -59,9 +59,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
sizeof(struct ib_cqe) + sizeof(bool)))
/*
* RX size is default of 8k plus headers, but data needs to align to
* 512 boundary, so use 1024 to have the extra space for alignment.
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
#define ISCSI_ISER_SG_TABLESIZE 256
@ -80,21 +82,41 @@ enum iser_conn_state {
};
struct iser_rx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
char buf[ISER_RX_SIZE];
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
bool in_use;
char pad[ISER_RX_PAD_SIZE];
} __packed;
};
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_rx_desc, rx_cqe);
}
static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
{
return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
}
static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
{
return isert_get_iser_hdr(desc) - (void *)desc->buf;
}
static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
{
return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
}
static void *isert_get_data(struct iser_rx_desc *desc)
{
void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
WARN_ON((uintptr_t)data & 511);
return data;
}
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
@ -141,9 +163,8 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
struct iser_rx_desc *login_req_buf;
struct iser_rx_desc *login_desc;
char *login_rsp_buf;
u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
struct iser_rx_desc *rx_descs;

View File

@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
struct irq_domain *domain;
int ret;
pr_info("Initializing J-Core AIC\n");
@ -100,11 +101,17 @@ static int __init aic_irq_of_init(struct device_node *node,
jcore_aic.irq_unmask = noop;
jcore_aic.name = "AIC";
domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
of_node_to_nid(node));
if (ret < 0)
return ret;
domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
&jcore_aic_irqdomain_ops,
&jcore_aic);
if (!domain)
return -ENOMEM;
irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
return 0;
}

View File

@ -26,9 +26,8 @@
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
struct pwm_state pwmstate;
unsigned int active_low;
unsigned int period;
int duty;
};
struct led_pwm_priv {
@ -36,37 +35,23 @@ struct led_pwm_priv {
struct led_pwm_data leds[0];
};
static void __led_pwm_set(struct led_pwm_data *led_dat)
{
int new_duty = led_dat->duty;
pwm_config(led_dat->pwm, new_duty, led_dat->period);
if (new_duty == 0)
pwm_disable(led_dat->pwm);
else
pwm_enable(led_dat->pwm);
}
static int led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
unsigned long long duty = led_dat->period;
unsigned long long duty = led_dat->pwmstate.period;
duty *= brightness;
do_div(duty, max);
if (led_dat->active_low)
duty = led_dat->period - duty;
duty = led_dat->pwmstate.period - duty;
led_dat->duty = duty;
__led_pwm_set(led_dat);
return 0;
led_dat->pwmstate.duty_cycle = duty;
led_dat->pwmstate.enabled = true;
return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
static inline size_t sizeof_pwm_leds_priv(int num_leds)
@ -85,7 +70,6 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct device_node *child)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
struct pwm_args pargs;
int ret;
led_data->active_low = led->active_low;
@ -109,17 +93,10 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->cdev.brightness_set_blocking = led_pwm_set;
/*
* FIXME: pwm_apply_args() should be removed when switching to the
* atomic PWM API.
*/
pwm_apply_args(led_data->pwm);
pwm_init_state(led_data->pwm, &led_data->pwmstate);
pwm_get_args(led_data->pwm, &pargs);
led_data->period = pargs.period;
if (!led_data->period && (led->pwm_period_ns > 0))
led_data->period = led->pwm_period_ns;
if (!led_data->pwmstate.period)
led_data->pwmstate.period = led->pwm_period_ns;
ret = led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {

View File

@ -1,14 +1,18 @@
/*
* ledtrig-cpu.c - LED trigger based on CPU activity
*
* This LED trigger will be registered for each possible CPU and named as
* cpu0, cpu1, cpu2, cpu3, etc.
* This LED trigger will be registered for first 8 CPUs and named
* as cpu0..cpu7. There's additional trigger called cpu that
* is on when any CPU is active.
*
* If you want support for arbitrary number of CPUs, make it one trigger,
* with additional sysfs file selecting which CPU to watch.
*
* It can be bound to any LED just like other triggers using either a
* board file or via sysfs interface.
*
* An API named ledtrig_cpu is exported for any user, who want to add CPU
* activity indication in their code
* activity indication in their code.
*
* Copyright 2011 Linus Walleij <linus.walleij@linaro.org>
* Copyright 2011 - 2012 Bryan Wu <bryan.wu@canonical.com>
@ -130,7 +134,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
static int __init ledtrig_cpu_init(void)
{
int cpu;
unsigned int cpu;
int ret;
/* Supports up to 9999 cpu cores */
@ -149,7 +153,10 @@ static int __init ledtrig_cpu_init(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
if (cpu >= 8)
continue;
snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
led_trigger_register_simple(trig->name, &trig->_trig);
}

View File

@ -343,7 +343,7 @@ EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
struct v4l2_fwnode_link *link)
{
const char *port_prop = is_of_node(__fwnode) ? "reg" : "port";
const char *port_prop = "reg";
struct fwnode_handle *fwnode;
memset(link, 0, sizeof(*link));

View File

@ -26,7 +26,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@ -136,7 +135,6 @@ struct meson_host {
struct mmc_host *mmc;
struct mmc_command *cmd;
spinlock_t lock;
void __iomem *regs;
struct clk *core_clk;
struct clk *mmc_clk;
@ -1016,8 +1014,6 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (WARN_ON(!host) || WARN_ON(!host->cmd))
return IRQ_NONE;
spin_lock(&host->lock);
cmd = host->cmd;
data = cmd->data;
cmd->error = 0;
@ -1045,11 +1041,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
if (meson_mmc_bounce_buf_read(data) ||
meson_mmc_get_next_command(cmd))
ret = IRQ_WAKE_THREAD;
else
ret = IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
out:
@ -1064,10 +1057,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
spin_unlock(&host->lock);
return ret;
}
@ -1220,8 +1209,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, host);
spin_lock_init(&host->lock);
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);

View File

@ -420,8 +420,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
if (extp->NumProtectionFields) {
struct cfi_intelext_otpinfo *otp =
(struct cfi_intelext_otpinfo *)&extp->extra[0];
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
if (extp_size >= sizeof(*extp) + extra_size) {
int i;
/* Do some byteswapping if necessary */
for (i = 0; i < extp->NumProtectionFields - 1; i++) {
otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
otp->FactGroups = le16_to_cpu(otp->FactGroups);
otp->UserGroups = le16_to_cpu(otp->UserGroups);
otp++;
}
}
}
}
if (extp->MinorVersion >= '1') {
@ -694,14 +711,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
int offs = 0;
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
int offs, numregions, numparts, partshift, numvirtchips, i, j;
int numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
offs = (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
if (extp->NumProtectionFields)
offs = (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;

View File

@ -677,10 +677,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
/* Don't modify or load balance ARPs that do not originate
* from the bond itself or a VLAN directly above the bond.
*/
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {

View File

@ -562,7 +562,8 @@ static void can_restart(struct net_device *dev)
struct can_frame *cf;
int err;
BUG_ON(netif_carrier_ok(dev));
if (netif_carrier_ok(dev))
netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
/*
* No synchronization needed because the device is bus-off and
@ -588,11 +589,12 @@ restart:
priv->can_stats.restarts++;
/* Now restart the device */
err = priv->do_set_mode(dev, CAN_MODE_START);
netif_carrier_on(dev);
if (err)
err = priv->do_set_mode(dev, CAN_MODE_START);
if (err) {
netdev_err(dev, "Error %d during restart", err);
netif_carrier_off(dev);
}
}
static void can_restart_work(struct work_struct *work)

View File

@ -607,6 +607,7 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
platform_device_put(priv->mii_pdev);
clk_disable_unprepare(priv->clk);
}

View File

@ -7313,7 +7313,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;

View File

@ -146,7 +146,6 @@ struct hv_netvsc_packet {
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
int ring_size;
u32 num_chn;
u32 send_sections;
u32 recv_sections;
@ -187,6 +186,9 @@ struct rndis_message;
struct netvsc_device;
struct net_device_context;
extern u32 netvsc_ring_bytes;
extern struct reciprocal_value netvsc_ring_reciprocal;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
@ -811,8 +813,6 @@ struct netvsc_device {
struct rndis_device *extension;
int ring_size;
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */

View File

@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
@ -654,14 +655,11 @@ void netvsc_device_remove(struct hv_device *device)
* Get the percentage of available bytes to write in the ring.
* The return value is in range from 0 to 100.
*/
static inline u32 hv_ringbuf_avail_percent(
struct hv_ring_buffer_info *ring_info)
static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
{
u32 avail_read, avail_write;
u32 avail_write = hv_get_bytes_to_write(ring_info);
hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
return avail_write * 100 / ring_info->ring_datasize;
return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
}
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
@ -1313,7 +1311,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@ -1325,8 +1322,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
net_device_ctx->tx_table[i] = 0;
net_device->ring_size = ring_size;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
@ -1353,10 +1348,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb,
net_device->chan_table);
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);

View File

@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
@ -54,9 +55,11 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
@ -1013,7 +1016,6 @@ static int netvsc_set_channels(struct net_device *net,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
@ -1111,7 +1113,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
}
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
@ -1631,7 +1632,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
@ -2082,7 +2082,6 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
@ -2264,16 +2263,23 @@ static int __init netvsc_drv_init(void)
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
pr_info("Increased ring_size to %d (min allowed)\n",
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
netvsc_ring_bytes = ring_size * PAGE_SIZE;
netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
register_netdevice_notifier(&netvsc_netdev_notifier);
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
goto err_vmbus_reg;
return 0;
err_vmbus_reg:
unregister_netdevice_notifier(&netvsc_netdev_notifier);
return ret;
}
MODULE_LICENSE("GPL");

View File

@ -1053,8 +1053,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
if (ret == 0)
napi_enable(&nvchan->napi);

View File

@ -771,7 +771,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
dev_set_promiscuity(lowerdev,
dev->flags & IFF_PROMISC ? 1 : -1);

View File

@ -466,6 +466,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, (int __user *) argp))
break;
if (val > U16_MAX) {
err = -EINVAL;
break;
}
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
@ -701,7 +705,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
/* strip address/control field if present */
p = skb->data;
if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto err;

View File

@ -137,6 +137,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
@ -157,14 +158,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
spin_lock(&hif_dev->tx.tx_lock);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:

View File

@ -1131,25 +1131,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;

View File

@ -212,6 +212,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = (struct wmi *) priv;
struct wmi_cmd_hdr *hdr;
unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
@ -225,23 +226,23 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
spin_unlock(&wmi->wmi_lock);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
free_skb:
kfree_skb(skb);
@ -310,8 +311,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
struct ath_common *common = ath9k_hw_common(ah);
u16 headroom = sizeof(struct htc_frame_hdr) +
sizeof(struct wmi_cmd_hdr);
unsigned long time_left, flags;
struct sk_buff *skb;
unsigned long time_left;
int ret = 0;
if (ah->ah_flags & AH_UNPLUGGED)
@ -343,7 +344,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (!time_left) {
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
spin_lock_irqsave(&wmi->wmi_lock, flags);
wmi->last_seq_id = 0;
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
return -ETIMEDOUT;

View File

@ -50,6 +50,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
}
static void mwifiex_pcie_work(struct work_struct *work);
static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter);
static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter);
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@ -58,8 +60,8 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
struct pcie_service_card *card = adapter->card;
struct mwifiex_dma_mapping mapping;
mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
if (pci_dma_mapping_error(card->dev, mapping.addr)) {
mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
@ -75,7 +77,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
struct mwifiex_dma_mapping mapping;
mwifiex_get_mapping(skb, &mapping);
pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
}
/*
@ -455,10 +457,9 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
struct sk_buff *cmdrsp = card->cmdrsp_buf;
for (count = 0; count < max_delay_loop_cnt; count++) {
pci_dma_sync_single_for_cpu(card->dev,
MWIFIEX_SKB_DMA_ADDR(cmdrsp),
sizeof(sleep_cookie),
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&card->dev->dev,
MWIFIEX_SKB_DMA_ADDR(cmdrsp),
sizeof(sleep_cookie), DMA_FROM_DEVICE);
buffer = cmdrsp->data;
sleep_cookie = get_unaligned_le32(buffer);
@ -467,10 +468,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
"sleep cookie found at count %d\n", count);
break;
}
pci_dma_sync_single_for_device(card->dev,
MWIFIEX_SKB_DMA_ADDR(cmdrsp),
sizeof(sleep_cookie),
PCI_DMA_FROMDEVICE);
dma_sync_single_for_device(&card->dev->dev,
MWIFIEX_SKB_DMA_ADDR(cmdrsp),
sizeof(sleep_cookie),
DMA_FROM_DEVICE);
usleep_range(20, 30);
}
@ -618,14 +619,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for RX ring.\n");
kfree(card->rxbd_ring_vbase);
return -ENOMEM;
}
if (mwifiex_map_pci_memory(adapter, skb,
MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
DMA_FROM_DEVICE)) {
kfree_skb(skb);
return -ENOMEM;
}
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@ -675,16 +677,14 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for EVENT buf.\n");
kfree(card->evtbd_ring_vbase);
return -ENOMEM;
}
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE)) {
DMA_FROM_DEVICE)) {
kfree_skb(skb);
kfree(card->evtbd_ring_vbase);
return -1;
return -ENOMEM;
}
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@ -724,7 +724,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@ -733,7 +733,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@ -763,7 +763,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@ -772,7 +772,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@ -798,7 +798,7 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@ -839,9 +839,10 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
card->txbd_ring_size,
&card->txbd_ring_pbase);
card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
card->txbd_ring_size,
&card->txbd_ring_pbase,
GFP_KERNEL);
if (!card->txbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@ -865,9 +866,9 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_txq_ring(adapter);
if (card->txbd_ring_vbase)
pci_free_consistent(card->dev, card->txbd_ring_size,
card->txbd_ring_vbase,
card->txbd_ring_pbase);
dma_free_coherent(&card->dev->dev, card->txbd_ring_size,
card->txbd_ring_vbase,
card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
card->txbd_rdptr = 0 | reg->tx_rollover_ind;
@ -882,6 +883,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
{
int ret;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@ -903,9 +905,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
card->rxbd_ring_size,
&card->rxbd_ring_pbase);
card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
card->rxbd_ring_size,
&card->rxbd_ring_pbase,
GFP_KERNEL);
if (!card->rxbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@ -919,7 +922,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->rxbd_ring_pbase >> 32),
card->rxbd_ring_size);
return mwifiex_init_rxq_ring(adapter);
ret = mwifiex_init_rxq_ring(adapter);
if (ret)
mwifiex_pcie_delete_rxbd_ring(adapter);
return ret;
}
/*
@ -933,9 +939,9 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_rxq_ring(adapter);
if (card->rxbd_ring_vbase)
pci_free_consistent(card->dev, card->rxbd_ring_size,
card->rxbd_ring_vbase,
card->rxbd_ring_pbase);
dma_free_coherent(&card->dev->dev, card->rxbd_ring_size,
card->rxbd_ring_vbase,
card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
@ -950,6 +956,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
{
int ret;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@ -967,9 +974,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
card->evtbd_ring_size,
&card->evtbd_ring_pbase);
card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
card->evtbd_ring_size,
&card->evtbd_ring_pbase,
GFP_KERNEL);
if (!card->evtbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@ -983,7 +991,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->evtbd_ring_pbase >> 32),
card->evtbd_ring_size);
return mwifiex_pcie_init_evt_ring(adapter);
ret = mwifiex_pcie_init_evt_ring(adapter);
if (ret)
mwifiex_pcie_delete_evtbd_ring(adapter);
return ret;
}
/*
@ -997,9 +1008,9 @@ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_evt_ring(adapter);
if (card->evtbd_ring_vbase)
pci_free_consistent(card->dev, card->evtbd_ring_size,
card->evtbd_ring_vbase,
card->evtbd_ring_pbase);
dma_free_coherent(&card->dev->dev, card->evtbd_ring_size,
card->evtbd_ring_vbase,
card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
@ -1026,7 +1037,7 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE)) {
DMA_FROM_DEVICE)) {
kfree_skb(skb);
return -1;
}
@ -1050,14 +1061,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmdrsp_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
card->cmdrsp_buf = NULL;
}
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@ -1072,8 +1083,10 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
u32 *cookie;
card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
&card->sleep_cookie_pbase);
card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
sizeof(u32),
&card->sleep_cookie_pbase,
GFP_KERNEL);
if (!card->sleep_cookie_vbase) {
mwifiex_dbg(adapter, ERROR,
"pci_alloc_consistent failed!\n");
@ -1101,9 +1114,9 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->sleep_cookie_vbase) {
pci_free_consistent(card->dev, sizeof(u32),
card->sleep_cookie_vbase,
card->sleep_cookie_pbase);
dma_free_coherent(&card->dev->dev, sizeof(u32),
card->sleep_cookie_vbase,
card->sleep_cookie_pbase);
card->sleep_cookie_vbase = NULL;
}
@ -1175,7 +1188,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
unmap_count++;
@ -1268,7 +1281,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
DMA_TO_DEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
@ -1358,7 +1371,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
card->tx_buf_list[wrindx] = NULL;
atomic_dec(&adapter->tx_hw_pending);
if (reg->pfu_enabled)
@ -1412,7 +1425,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@ -1450,7 +1463,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb_tmp,
MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE))
DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
@ -1527,7 +1540,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@ -1539,7 +1552,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@ -1551,7 +1564,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@ -1560,7 +1573,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@ -1569,7 +1582,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
mwifiex_dbg(adapter, ERROR,
"%s: failed to assert door-bell intr\n", __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@ -1628,7 +1641,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
put_unaligned_le16((u16)skb->len, &payload[0]);
put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
card->cmd_buf = skb;
@ -1728,17 +1741,16 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"info: Rx CMD Response\n");
if (adapter->curr_cmd)
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE);
else
pci_dma_sync_single_for_cpu(card->dev,
MWIFIEX_SKB_DMA_ADDR(skb),
MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&card->dev->dev,
MWIFIEX_SKB_DMA_ADDR(skb),
MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE);
/* Unmap the command as a response has been received. */
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@ -1749,10 +1761,10 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
pci_dma_sync_single_for_device(card->dev,
MWIFIEX_SKB_DMA_ADDR(skb),
MWIFIEX_SLEEP_COOKIE_SIZE,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_device(&card->dev->dev,
MWIFIEX_SKB_DMA_ADDR(skb),
MWIFIEX_SLEEP_COOKIE_SIZE,
DMA_FROM_DEVICE);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@ -1763,7 +1775,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_delay_for_sleep_cookie(adapter,
MWIFIEX_MAX_DELAY_COUNT);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
skb_pull(skb, adapter->intf_hdr_len);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
@ -1779,7 +1791,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb_push(skb, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
DMA_FROM_DEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
skb_pull(skb, adapter->intf_hdr_len);
@ -1821,7 +1833,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
card->cmdrsp_buf = skb;
skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
DMA_FROM_DEVICE))
return -1;
}
@ -1876,7 +1888,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@ -1955,7 +1967,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
skb_put(skb, MAX_EVENT_SIZE - skb->len);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
DMA_FROM_DEVICE))
return -1;
card->evt_buf_list[rdptr] = skb;
desc = card->evtbd_ring[rdptr];
@ -2237,7 +2249,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
"interrupt status during fw dnld.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
ret = -1;
goto done;
}
@ -2249,12 +2261,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
ret = -1;
goto done;
}
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
offset += txlen;
} while (true);
@ -2919,14 +2931,13 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
pr_notice("try set_consistent_dma_mask(32)\n");
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
pr_err("set_dma_mask(32) failed\n");
pr_err("set_dma_mask(32) failed: %d\n", ret);
goto err_set_dma_mask;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
pr_err("set_consistent_dma_mask(64) failed\n");
goto err_set_dma_mask;

View File

@ -41,7 +41,6 @@
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
/* Number of bytes allowed on the internal guest Rx queue. */
@ -525,8 +524,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;

View File

@ -379,7 +379,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = ks_dw_pcie_link_up,
};
static int __exit ks_pcie_remove(struct platform_device *pdev)
static int ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
@ -388,7 +388,7 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
return 0;
}
static int __init ks_pcie_probe(struct platform_device *pdev)
static int ks_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
@ -455,9 +455,9 @@ fail_clk:
return ret;
}
static struct platform_driver ks_pcie_driver __refdata = {
static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
.remove = __exit_p(ks_pcie_remove),
.remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = of_match_ptr(ks_pcie_of_match),

View File

@ -730,6 +730,8 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->phy_ahb_reset))
return PTR_ERR(res->phy_ahb_reset);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}

View File

@ -114,6 +114,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
/* Use special handling for Pin0 debounce */
if (offset == 0) {
pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
debounce = 0;
}
pin_reg = readl(gpio_dev->base + offset * 4);
if (debounce) {
@ -191,6 +199,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *output_value;
char *output_enable;
seq_printf(s, "WAKE_INT_MASTER_REG: 0x%08x\n", readl(gpio_dev->base + WAKE_INT_MASTER_REG));
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
seq_printf(s, "GPIO bank%d\t", bank);

View File

@ -21,6 +21,7 @@
#define AMD_GPIO_PINS_BANK3 32
#define WAKE_INT_MASTER_REG 0xfc
#define INTERNAL_GPIO0_DEBOUNCE (1 << 15)
#define EOI_MASK (1 << 29)
#define WAKE_INT_STATUS_REG0 0x2f8

View File

@ -899,6 +899,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
}
};
/*
* This lock class allows to tell lockdep that parent IRQ and children IRQ do
* not share the same class so it does not raise false positive
*/
static struct lock_class_key atmel_lock_key;
static struct lock_class_key atmel_request_key;
static int atmel_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -1044,6 +1051,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
handle_simple_irq);
irq_set_chip_data(irq, atmel_pioctrl);
irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
dev_dbg(dev,
"atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
i, irq);

View File

@ -346,7 +346,8 @@ ssize_t ptp_read(struct posix_clock *pc,
for (i = 0; i < cnt; i++) {
event[i] = queue->buf[queue->head];
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
/* Paired with READ_ONCE() in queue_cnt() */
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);

View File

@ -68,10 +68,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
dst->t.sec = seconds;
dst->t.nsec = remainder;
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
spin_unlock_irqrestore(&queue->lock, flags);
}

View File

@ -68,9 +68,13 @@ struct ptp_clock {
* that a writer might concurrently increment the tail does not
* matter, since the queue remains nonempty nonetheless.
*/
static inline int queue_cnt(struct timestamp_event_queue *q)
static inline int queue_cnt(const struct timestamp_event_queue *q)
{
int cnt = q->tail - q->head;
/*
* Paired with WRITE_ONCE() in enqueue_external_timestamp(),
* ptp_read(), extts_fifo_show().
*/
int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}

View File

@ -91,7 +91,8 @@ static ssize_t extts_fifo_show(struct device *dev,
qcnt = queue_cnt(queue);
if (qcnt) {
event = queue->buf[queue->head];
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
/* Paired with READ_ONCE() in queue_cnt() */
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);

View File

@ -83,6 +83,7 @@ struct sti_pwm_compat_data {
unsigned int cpt_num_devs;
unsigned int max_pwm_cnt;
unsigned int max_prescale;
struct sti_cpt_ddata *ddata;
};
struct sti_pwm_chip {
@ -318,7 +319,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
struct sti_pwm_compat_data *cdata = pc->cdata;
struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
struct device *dev = pc->dev;
unsigned int effective_ticks;
unsigned long long high, low;
@ -421,7 +422,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
while (cpt_int_stat) {
devicenum = ffs(cpt_int_stat) - 1;
ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
ddata = &pc->cdata->ddata[devicenum];
/*
* Capture input:
@ -599,43 +600,50 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
if (!cdata->pwm_num_devs)
goto skip_pwm;
if (cdata->pwm_num_devs) {
pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
if (IS_ERR(pc->pwm_clk)) {
dev_err(dev, "failed to get PWM clock\n");
return PTR_ERR(pc->pwm_clk);
}
pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
if (IS_ERR(pc->pwm_clk)) {
dev_err(dev, "failed to get PWM clock\n");
return PTR_ERR(pc->pwm_clk);
ret = clk_prepare(pc->pwm_clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
return ret;
}
}
ret = clk_prepare(pc->pwm_clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
return ret;
if (cdata->cpt_num_devs) {
pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
if (IS_ERR(pc->cpt_clk)) {
dev_err(dev, "failed to get PWM capture clock\n");
return PTR_ERR(pc->cpt_clk);
}
ret = clk_prepare(pc->cpt_clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
return ret;
}
cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
if (!cdata->ddata)
return -ENOMEM;
}
skip_pwm:
if (!cdata->cpt_num_devs)
goto skip_cpt;
pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
if (IS_ERR(pc->cpt_clk)) {
dev_err(dev, "failed to get PWM capture clock\n");
return PTR_ERR(pc->cpt_clk);
}
ret = clk_prepare(pc->cpt_clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
return ret;
}
skip_cpt:
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.base = -1;
pc->chip.npwm = pc->cdata->pwm_num_devs;
for (i = 0; i < cdata->cpt_num_devs; i++) {
struct sti_cpt_ddata *ddata = &cdata->ddata[i];
init_waitqueue_head(&ddata->wait);
mutex_init(&ddata->lock);
}
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
clk_unprepare(pc->pwm_clk);
@ -643,19 +651,6 @@ skip_cpt:
return ret;
}
for (i = 0; i < cdata->cpt_num_devs; i++) {
struct sti_cpt_ddata *ddata;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
init_waitqueue_head(&ddata->wait);
mutex_init(&ddata->lock);
pwm_set_chip_data(&pc->chip.pwms[i], ddata);
}
platform_set_drvdata(pdev, pc);
return 0;

View File

@ -429,6 +429,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
if (IS_ERR_OR_NULL(rstc))
return;
kref_put(&rstc->refcnt, __reset_control_release);
}

View File

@ -449,6 +449,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
@ -534,8 +535,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
/* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
@ -594,9 +594,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)

View File

@ -708,6 +708,9 @@ void zfcp_scsi_rport_work(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
set_worker_desc("zrp%c-%16llx",
(port->rport_task == RPORT_ADD) ? 'a' : 'd',
port->wwpn); /* < WORKER_DESC_LEN=24 */
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;

View File

@ -435,8 +435,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
unsigned short oxid;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@ -448,11 +446,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
goto err;
}
tmp_skb = skb_share_check(skb, GFP_ATOMIC);
if (!tmp_skb)
goto err;
skb = tmp_skb;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return -1;
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
@ -470,8 +466,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
oxid = ntohs(fh->fh_ox_id);
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;

View File

@ -13787,9 +13787,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
if (eqe == NULL)
break;
ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
if (!(++ecount % fpeq->entry_repost) ||
ccount > LPFC_MAX_ISR_CQE)

View File

@ -2068,8 +2068,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION

View File

@ -22,7 +22,7 @@
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |

View File

@ -144,6 +144,8 @@ extern int ql2xmvasynctoatio;
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);

View File

@ -3315,6 +3315,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
/* Reserve 1/2 of emergency exchanges for ELS.*/
if (qla2xuseresexchforels)
ha->fw_options[2] |= BIT_8;
else
ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,

View File

@ -3490,11 +3490,14 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
!IS_QLA27XX(ha))
if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
!IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
!IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
goto skip_msi;
if (ql2xenablemsix == 2)
goto skip_msix;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
(ha->pdev->subsystem_device == 0x7040 ||
ha->pdev->subsystem_device == 0x7041 ||

View File

@ -262,6 +262,35 @@ MODULE_PARM_DESC(ql2xautodetectsfp,
"Detect SFP range and set appropriate distance.\n"
"1 (Default): Enable\n");
int ql2xenablemsix = 1;
module_param(ql2xenablemsix, int, 0444);
MODULE_PARM_DESC(ql2xenablemsix,
"Set to enable MSI or MSI-X interrupt mechanism.\n"
" Default is 1, enable MSI-X interrupt mechanism.\n"
" 0 -- enable traditional pin-based mechanism.\n"
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
int qla2xuseresexchforels;
module_param(qla2xuseresexchforels, int, 0444);
MODULE_PARM_DESC(qla2xuseresexchforels,
"Reserve 1/2 of emergency exchanges for ELS.\n"
" 0 (default): disabled");
int ql2xprotmask;
module_param(ql2xprotmask, int, 0644);
MODULE_PARM_DESC(ql2xprotmask,
"Override DIF/DIX protection capabilities mask\n"
"Default is 0 which sets protection mask based on "
"capabilities reported by HBA firmware.\n");
int ql2xprotguard;
module_param(ql2xprotguard, int, 0644);
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
" 0 -- Let HBA firmware decide\n"
" 1 -- Force T10 CRC\n"
" 2 -- Force IP checksum\n");
/*
* SCSI host template entry points
*/
@ -3015,6 +3044,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
if (ql2xenabledif && ql2xenabledif != 2) {
ql_log(ql_log_warn, base_vha, 0x302d,
"Invalid value for ql2xenabledif, resetting it to default (2)\n");
ql2xenabledif = 2;
}
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@ -3243,15 +3279,16 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
scsi_host_set_prot(host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION);
guard = SHOST_DIX_GUARD_CRC;
@ -3259,7 +3296,10 @@ skip_dpc:
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
guard |= SHOST_DIX_GUARD_IP;
scsi_host_set_guard(host, guard);
if (ql2xprotguard)
scsi_host_set_guard(host, ql2xprotguard);
else
scsi_host_set_guard(host, guard);
} else
base_vha->flags.difdix_supported = 0;
}

View File

@ -51,11 +51,11 @@ static int ashmem_open(struct inode *inode, struct file *file)
int ret;
ret = generic_file_open(inode, file);
if (unlikely(ret))
if (ret)
return ret;
asma = kmem_cache_alloc(ashmem_area_cachep, GFP_KERNEL);
if (unlikely(!asma))
if (!asma)
return -ENOMEM;
*asma = (typeof(*asma)){
@ -198,7 +198,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
/* user needs to SET_SIZE before mapping */
size = READ_ONCE(asma->size);
if (unlikely(!size))
if (!size)
return -EINVAL;
/* requested mapping size larger than object size */
@ -207,8 +207,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
/* requested protection bits must match our allowed protection mask */
prot_mask = READ_ONCE(asma->prot_mask);
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(prot_mask, 0)) &
calc_vm_prot_bits(PROT_MASK, 0)))
if ((vma->vm_flags & ~calc_vm_prot_bits(prot_mask, 0)) &
calc_vm_prot_bits(PROT_MASK, 0))
return -EPERM;
vma->vm_flags &= ~calc_vm_may_flags(~prot_mask);
@ -241,7 +241,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
{
/* the user can only remove, not add, protection bits */
if (unlikely((READ_ONCE(asma->prot_mask) & prot) != prot))
if ((READ_ONCE(asma->prot_mask) & prot) != prot)
return -EINVAL;
/* does the application expect PROT_READ to imply PROT_EXEC? */
@ -329,14 +329,14 @@ static int __init ashmem_init(void)
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
if (unlikely(!ashmem_area_cachep)) {
if (!ashmem_area_cachep) {
pr_err("failed to create slab cache\n");
ret = -ENOMEM;
goto out;
}
ret = misc_register(&ashmem_misc);
if (unlikely(ret)) {
if (ret) {
pr_err("failed to register misc device!\n");
goto out_free1;
}

View File

@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/usb/typec.h>
@ -56,7 +57,7 @@ static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
}
static int tcpci_read16(struct tcpci *tcpci, unsigned int reg,
unsigned int *val)
u16 *val)
{
return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
}
@ -299,15 +300,15 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc,
const struct pd_message *msg)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg, cnt, header;
u16 header = msg ? le16_to_cpu(msg->header) : 0;
unsigned int reg, cnt;
int ret;
cnt = msg ? pd_header_cnt(msg->header) * 4 : 0;
cnt = msg ? pd_header_cnt(header) * 4 : 0;
ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
if (ret < 0)
return ret;
header = msg ? msg->header : 0;
ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
if (ret < 0)
return ret;
@ -346,6 +347,10 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
if (ret < 0)
return ret;
/* Clear all events */
ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
if (ret < 0)
@ -370,7 +375,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
static irqreturn_t tcpci_irq(int irq, void *dev_id)
{
struct tcpci *tcpci = dev_id;
unsigned int status, reg;
u16 status;
tcpci_read16(tcpci, TCPC_ALERT, &status);
@ -386,6 +391,8 @@ static irqreturn_t tcpci_irq(int irq, void *dev_id)
tcpm_cc_change(tcpci->port);
if (status & TCPC_ALERT_POWER_STATUS) {
unsigned int reg;
regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &reg);
/*
@ -401,11 +408,12 @@ static irqreturn_t tcpci_irq(int irq, void *dev_id)
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
unsigned int cnt;
u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
tcpci_read16(tcpci, TCPC_RX_HDR, &reg);
msg.header = reg;
tcpci_read16(tcpci, TCPC_RX_HDR, &header);
msg.header = cpu_to_le16(header);
if (WARN_ON(cnt > sizeof(msg.payload)))
cnt = sizeof(msg.payload);
@ -451,6 +459,12 @@ static int tcpci_parse_config(struct tcpci *tcpci)
/* TODO: Populate struct tcpc_config from ACPI/device-tree */
tcpci->tcpc.config = &tcpci_tcpc_config;
tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
"connector");
if (!tcpci->tcpc.fwnode) {
dev_err(tcpci->dev, "Can't find connector node.\n");
return -EINVAL;
}
return 0;
}

View File

@ -80,6 +80,7 @@
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
#define TCPC_FAULT_STATUS 0x1f
#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
#define TCPC_COMMAND 0x23
#define TCPC_CMD_WAKE_I2C 0x11

View File

@ -3575,6 +3575,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
else
port->try_role = TYPEC_NO_PREFERRED_ROLE;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.prefer_role = tcpc->config->default_role;
port->typec_caps.type = tcpc->config->type;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */

View File

@ -54,6 +54,27 @@ enum tcpm_transmit_type {
TCPC_TX_BIST_MODE_2 = 7
};
/**
* struct tcpc_config - Port configuration
* @src_pdo: PDO parameters sent to port partner as response to
* PD_CTRL_GET_SOURCE_CAP message
* @nr_src_pdo: Number of entries in @src_pdo
* @snk_pdo: PDO parameters sent to partner as response to
* PD_CTRL_GET_SINK_CAP message
* @nr_snk_pdo: Number of entries in @snk_pdo
* @max_snk_mv: Maximum acceptable sink voltage in mV
* @max_snk_ma: Maximum sink current in mA
* @max_snk_mw: Maximum required sink power in mW
* @operating_snk_mw:
* Required operating sink power in mW
* @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
* TYPEC_PORT_DRP)
* @default_role:
* Default port role (TYPEC_SINK or TYPEC_SOURCE).
* Set to TYPEC_NO_PREFERRED_ROLE if no default role.
* @try_role_hw:True if try.{Src,Snk} is implemented in hardware
* @alt_modes: List of supported alternate modes
*/
struct tcpc_config {
const u32 *src_pdo;
unsigned int nr_src_pdo;
@ -79,7 +100,6 @@ struct tcpc_config {
enum tcpc_usb_switch {
TCPC_USB_SWITCH_CONNECT,
TCPC_USB_SWITCH_DISCONNECT,
TCPC_USB_SWITCH_RESTORE, /* TODO FIXME */
};
/* Mux state attributes */
@ -104,17 +124,42 @@ struct tcpc_mux_dev {
void *priv_data;
};
/**
* struct tcpc_dev - Port configuration and callback functions
* @config: Pointer to port configuration
* @fwnode: Pointer to port fwnode
* @get_vbus: Called to read current VBUS state
* @get_current_limit:
* Optional; called by the tcpm core when configured as a snk
* and cc=Rp-def. This allows the tcpm to provide a fallback
* current-limit detection method for the cc=Rp-def case.
* For example, some tcpcs may include BC1.2 charger detection
* and use that in this case.
* @set_cc: Called to set value of CC pins
* @get_cc: Called to read current CC pin values
* @set_polarity:
* Called to set polarity
* @set_vconn: Called to enable or disable VCONN
* @set_vbus: Called to enable or disable VBUS
* @set_current_limit:
* Optional; called to set current limit as negotiated
* with partner.
* @set_pd_rx: Called to enable or disable reception of PD messages
* @set_roles: Called to set power and data roles
* @start_drp_toggling:
* Optional; if supported by hardware, called to start DRP
* toggling. DRP toggling is stopped automatically if
* a connection is established.
* @try_role: Optional; called to set a preferred role
* @pd_transmit:Called to transmit PD message
* @mux: Pointer to multiplexer data
*/
struct tcpc_dev {
const struct tcpc_config *config;
struct fwnode_handle *fwnode;
int (*init)(struct tcpc_dev *dev);
int (*get_vbus)(struct tcpc_dev *dev);
/*
* This optional callback gets called by the tcpm core when configured
* as a snk and cc=Rp-def. This allows the tcpm to provide a fallback
* current-limit detection method for the cc=Rp-def case. E.g. some
* tcpcs may include BC1.2 charger detection and use that in this case.
*/
int (*get_current_limit)(struct tcpc_dev *dev);
int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,

View File

@ -2142,6 +2142,8 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);

View File

@ -1279,6 +1279,13 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.type = PORT_SC16IS7XX;
s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iobase = i;
/*
* Use all ones as membase to make sure uart_configure_port() in
* serial_core.c does not abort for SPI/I2C devices where the
* membase address is not applicable.
*/
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;

View File

@ -439,8 +439,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
temp = size;
size -= temp;
next += temp;
if (temp == size)
goto done;
}
temp = snprintf(next, size, "\n");
@ -450,7 +448,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
size -= temp;
next += temp;
done:
*sizep = size;
*nextp = next;
}

View File

@ -1869,9 +1869,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
break;
}
/* fall through */
break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,

View File

@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
{
u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
u32 bit_mask, end_mask, eorx, shift;
const char *s = image->data, *src;
u32 bit_mask, eorx, shift;
const u8 *s = image->data, *src;
u32 *dst;
const u32 *tab = NULL;
const u32 *tab;
size_t tablen;
u32 colortab[16];
int i, j, k;
switch (bpp) {
case 8:
tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
tablen = 16;
break;
case 16:
tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
tablen = 4;
break;
case 32:
default:
tab = cfb_tab32;
tablen = 2;
break;
default:
return;
}
for (i = ppw-1; i--; ) {
@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
eorx = fgx ^ bgx;
k = image->width/ppw;
for (i = 0; i < tablen; ++i)
colortab[i] = (tab[i] & eorx) ^ bgx;
for (i = image->height; i--; ) {
dst = dst1;
shift = 8;
src = s;
for (j = k; j--; ) {
/*
* Manually unroll the per-line copying loop for better
* performance. This works until we processed the last
* completely filled source byte (inclusive).
*/
switch (ppw) {
case 4: /* 8 bpp */
for (j = k; j >= 2; j -= 2, ++src) {
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 2: /* 16 bpp */
for (j = k; j >= 4; j -= 4, ++src) {
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 1: /* 32 bpp */
for (j = k; j >= 8; j -= 8, ++src) {
*dst++ = colortab[(*src >> 7) & bit_mask];
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 5) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 3) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 1) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
}
/*
* For image widths that are not a multiple of 8, there
* are trailing pixels left on the current line. Print
* them as well.
*/
for (; j--; ) {
shift -= ppw;
end_mask = tab[(*src >> shift) & bit_mask];
*dst++ = (end_mask & eorx) ^ bgx;
*dst++ = colortab[(*src >> shift) & bit_mask];
if (!shift) {
shift = 8;
src++;
++src;
}
}
dst1 += p->fix.line_length;
s += spitch;
}

View File

@ -237,7 +237,7 @@ struct sti_rom_font {
u8 height;
u8 font_type; /* language type */
u8 bytes_per_char;
u32 next_font;
s32 next_font; /* note: signed int */
u8 underline_height;
u8 underline_pos;
u8 res008[2];

View File

@ -497,7 +497,9 @@ static void lateeoi_list_add(struct irq_info *info)
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
if (list_empty(&eoi->eoi_list)) {
elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
if (!elem || info->eoi_time < elem->eoi_time) {
list_add(&info->eoi_list, &eoi->eoi_list);
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, delay);

View File

@ -3537,6 +3537,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
if (ret) {
btrfs_tree_unlock(split);
free_extent_buffer(split);
btrfs_abort_transaction(trans, ret);
return ret;
}

View File

@ -1695,6 +1695,15 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
* are limited to own subvolumes only
*/
ret = -EPERM;
} else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
/*
* Snapshots must be made with the src_inode referring
* to the subvolume inode, otherwise the permission
* checking above is useless because we may have
* permission on a lower directory but not the subvol
* itself.
*/
ret = -EINVAL;
} else {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,

View File

@ -717,8 +717,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
btrfs_wait_for_commit(root->fs_info, 0);
if (trans == ERR_PTR(-ENOENT)) {
int ret;
ret = btrfs_wait_for_commit(root->fs_info, 0);
if (ret)
return ERR_PTR(ret);
}
return trans;
}

View File

@ -2081,7 +2081,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
if (!log_di || log_di == ERR_PTR(-ENOENT)) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@ -5013,8 +5013,7 @@ again:
* we don't need to do more work nor fallback to
* a transaction commit.
*/
if (IS_ERR(other_inode) &&
PTR_ERR(other_inode) == -ENOENT) {
if (other_inode == ERR_PTR(-ENOENT)) {
goto next_key;
} else if (IS_ERR(other_inode)) {
err = PTR_ERR(other_inode);

Some files were not shown because too many files have changed in this diff Show More