aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/blacklist.c54
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/pci_irq.c11
-rw-r--r--drivers/acpi/processor_core.c9
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/utils.c12
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/base/power/opp.c78
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c3
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-opal.c294
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/multicast.c11
-rw-r--r--drivers/infiniband/core/umem.c72
-rw-r--r--drivers/infiniband/core/umem_odp.c668
-rw-r--r--drivers/infiniband/core/umem_rbtree.c94
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c171
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c45
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c69
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h116
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c323
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c798
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c197
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c104
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h30
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c102
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c91
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1599
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h80
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/joystick/xpad.c8
-rw-r--r--drivers/input/keyboard/Kconfig8
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/amikbd.c47
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/cap1106.c341
-rw-r--r--drivers/input/keyboard/cap11xx.c376
-rw-r--r--drivers/input/keyboard/gpio_keys.c37
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c92
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c42
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c84
-rw-r--r--drivers/input/misc/88pm860x_onkey.c6
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/drv260x.c6
-rw-r--r--drivers/input/misc/drv2667.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c6
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/kxtj9.c6
-rw-r--r--drivers/input/misc/max77693-haptic.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c4
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c6
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c4
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c6
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c4
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c4
-rw-r--r--drivers/input/mouse/Kconfig30
-rw-r--r--drivers/input/mouse/Makefile5
-rw-r--r--drivers/input/mouse/cyapa.c289
-rw-r--r--drivers/input/mouse/elan_i2c.h86
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1137
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c611
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c514
-rw-r--r--drivers/input/mouse/lifebook.h6
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/altera_ps2.c81
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h10
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c6
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c6
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c7
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c6
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c1271
-rw-r--r--drivers/input/touchscreen/goodix.c395
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/ipaq-micro-ts.c6
-rw-r--r--drivers/input/touchscreen/mms114.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/st1232.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c6
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/zforce_ts.c6
-rw-r--r--drivers/iommu/amd_iommu.c14
-rw-r--r--drivers/iommu/intel_irq_remapping.c10
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c18
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c466
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/misc/cxl/context.c26
-rw-r--r--drivers/misc/cxl/cxl.h9
-rw-r--r--drivers/misc/cxl/file.c6
-rw-r--r--drivers/misc/cxl/native.c12
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c18
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c28
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c3
-rw-r--r--drivers/mtd/devices/docg3.c122
-rw-r--r--drivers/mtd/devices/m25p80.c30
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c6
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/pmc551.c3
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c1
-rw-r--r--drivers/mtd/maps/physmap_of.c8
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c120
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h4
-rw-r--r--drivers/mtd/nand/cafe_nand.c45
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c10
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c153
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c201
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/nand_base.c12
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c42
-rw-r--r--drivers/mtd/nand/omap2.c24
-rw-r--r--drivers/mtd/nand/orion_nand.c39
-rw-r--r--drivers/mtd/nand/sunxi_nand.c1432
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c23
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c313
-rw-r--r--drivers/mtd/tests/oobtest.c77
-rw-r--r--drivers/mtd/tests/torturetest.c4
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c119
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c10
-rw-r--r--drivers/net/macvtap.c30
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/fixed_phy.c (renamed from drivers/net/phy/fixed.c)0
-rw-r--r--drivers/net/tun.c26
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c6
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c27
-rw-r--r--drivers/net/xen-netback/xenbus.c12
-rw-r--r--drivers/net/xen-netfront.c11
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c6
-rw-r--r--drivers/pci/ioapic.c121
-rw-r--r--drivers/phy/phy-omap-usb2.c2
-rw-r--r--drivers/phy/phy-ti-pipe3.c2
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/acerhdf.c265
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/dell-laptop.c1057
-rw-r--r--drivers/platform/x86/dell-smo8800.c10
-rw-r--r--drivers/platform/x86/dell-wmi.c176
-rw-r--r--drivers/platform/x86/eeepc-laptop.c213
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c6
-rw-r--r--drivers/platform/x86/hp-wireless.c3
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c3
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c3
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/msi-wmi.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c6
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c166
-rw-r--r--drivers/power/pm2301_charger.c4
-rw-r--r--drivers/pwm/Kconfig22
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c299
-rw-r--r--drivers/pwm/pwm-bcm2835.c205
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c64
-rw-r--r--drivers/scsi/53c700.c41
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_debug.c62
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c11
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c11
-rw-r--r--drivers/spi/spi-coldfire-qspi.c2
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qup.c4
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/staging/gdm72xx/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c17
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/parport/Kconfig (renamed from drivers/media/parport/Kconfig)24
-rw-r--r--drivers/staging/media/parport/Makefile (renamed from drivers/media/parport/Makefile)0
-rw-r--r--drivers/staging/media/parport/bw-qcam.c (renamed from drivers/media/parport/bw-qcam.c)0
-rw-r--r--drivers/staging/media/parport/c-qcam.c (renamed from drivers/media/parport/c-qcam.c)0
-rw-r--r--drivers/staging/media/parport/pms.c (renamed from drivers/media/parport/pms.c)0
-rw-r--r--drivers/staging/media/parport/w9966.c (renamed from drivers/media/parport/w9966.c)0
-rw-r--r--drivers/staging/media/tlg2300/Kconfig (renamed from drivers/media/usb/tlg2300/Kconfig)6
-rw-r--r--drivers/staging/media/tlg2300/Makefile (renamed from drivers/media/usb/tlg2300/Makefile)0
-rw-r--r--drivers/staging/media/tlg2300/pd-alsa.c (renamed from drivers/media/usb/tlg2300/pd-alsa.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-common.h (renamed from drivers/media/usb/tlg2300/pd-common.h)0
-rw-r--r--drivers/staging/media/tlg2300/pd-dvb.c (renamed from drivers/media/usb/tlg2300/pd-dvb.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-main.c (renamed from drivers/media/usb/tlg2300/pd-main.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-radio.c (renamed from drivers/media/usb/tlg2300/pd-radio.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-video.c (renamed from drivers/media/usb/tlg2300/pd-video.c)0
-rw-r--r--drivers/staging/media/tlg2300/vendorcmds.h (renamed from drivers/media/usb/tlg2300/vendorcmds.h)0
-rw-r--r--drivers/staging/media/vino/Kconfig24
-rw-r--r--drivers/staging/media/vino/Makefile3
-rw-r--r--drivers/staging/media/vino/indycam.c (renamed from drivers/media/platform/indycam.c)0
-rw-r--r--drivers/staging/media/vino/indycam.h (renamed from drivers/media/platform/indycam.h)0
-rw-r--r--drivers/staging/media/vino/saa7191.c (renamed from drivers/media/i2c/saa7191.c)0
-rw-r--r--drivers/staging/media/vino/saa7191.h (renamed from drivers/media/i2c/saa7191.h)0
-rw-r--r--drivers/staging/media/vino/vino.c (renamed from drivers/media/platform/vino.c)0
-rw-r--r--drivers/staging/media/vino/vino.h (renamed from drivers/media/platform/vino.h)0
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c26
-rw-r--r--drivers/target/loopback/tcm_loop.c71
-rw-r--r--drivers/target/loopback/tcm_loop.h7
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c344
-rw-r--r--drivers/target/target_core_device.c90
-rw-r--r--drivers/target/target_core_file.c42
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_internal.h28
-rw-r--r--drivers/target/target_core_pr.c125
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c41
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_user.c42
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/thermal/Kconfig32
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/armada_thermal.c20
-rw-r--r--drivers/thermal/clock_cooling.c485
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c12
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c80
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/intel_powerclamp.c3
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c148
-rw-r--r--drivers/thermal/rockchip_thermal.c693
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h1
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c692
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h123
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c239
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h159
-rw-r--r--drivers/thermal/tegra_soctherm.c476
-rw-r--r--drivers/thermal/thermal_core.c8
-rw-r--r--drivers/thermal/thermal_core.h18
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/tty/serial/mfd.c7
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c10
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/pci/Kconfig8
-rw-r--r--drivers/vfio/pci/vfio_pci.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c7
-rw-r--r--drivers/vhost/vringh.c125
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_meram.c4
-rw-r--r--drivers/virtio/virtio.c37
-rw-r--r--drivers/virtio/virtio_balloon.c57
-rw-r--r--drivers/virtio/virtio_pci_common.c39
-rw-r--r--drivers/virtio/virtio_pci_common.h7
-rw-r--r--drivers/virtio/virtio_pci_legacy.c24
-rw-r--r--drivers/watchdog/imx2_wdt.c47
-rw-r--r--drivers/xen/xen-scsiback.c2
419 files changed, 18047 insertions, 5277 deletions
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 7556e7c4a055..9b693d54c743 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -305,60 +305,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
*/
/*
- * Lenovo has a mix of systems OSI(Linux) situations
- * and thus we can not wildcard the vendor.
- *
- * _OSI(Linux) helps sound
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- * T400, T500
- * _OSI(Linux) has Linux specific hooks
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- * _OSI(Linux) is a NOP:
- * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
- */
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad R61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad X61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T400",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
- },
- },
- /*
* Without this this EEEpc exports a non working WMI interface, with
* this it exports a working "good old" eeepc_laptop interface, fixing
* both brightness control, and rfkill not working.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 897640188acd..c2daa85fc9f7 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -680,13 +680,21 @@ static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
if (error)
return error;
+ if (adev->wakeup.flags.enabled)
+ return 0;
+
res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
+ if (ACPI_SUCCESS(res)) {
+ adev->wakeup.flags.enabled = 1;
+ } else {
acpi_disable_wakeup_device_power(adev);
return -EIO;
}
} else {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (adev->wakeup.flags.enabled) {
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ adev->wakeup.flags.enabled = 0;
+ }
acpi_disable_wakeup_device_power(adev);
}
return 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5f9b74b9b71f..1b5853f384e2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -844,6 +844,8 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
+ if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return;
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index caf9b76b7ef8..7a36f02598a6 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -325,6 +325,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct thermal_cooling_device *cdev;
struct acpi_fan *fan;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ char *name;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
@@ -346,7 +347,12 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
}
- cdev = thermal_cooling_device_register("Fan", device,
+ if (!strncmp(pdev->name, "PNP0C0B", strlen("PNP0C0B")))
+ name = "Fan";
+ else
+ name = acpi_device_bid(device);
+
+ cdev = thermal_cooling_device_register(name, device,
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7cc4e33179f9..5277a0ee5704 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -413,6 +413,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
+ if (dev->irq_managed && dev->irq > 0)
+ return 0;
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry) {
/*
@@ -456,6 +459,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return rc;
}
dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -478,7 +482,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin)
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
/* Keep IOAPIC pin configuration when suspending */
@@ -506,6 +510,9 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
*/
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
- if (gsi >= 0 && dev->irq > 0)
+ if (gsi >= 0) {
acpi_unregister_gsi(gsi);
+ dev->irq = 0;
+ dev->irq_managed = 0;
+ }
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index ef58f46c8442..342942f90a10 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -125,13 +125,12 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
- if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
+ if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
map_lapic_id(header, acpi_id, &apic_id);
- } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
+ else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
map_lsapic_id(header, type, acpi_id, &apic_id);
- } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
+ else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &apic_id);
- }
exit:
kfree(buffer.pointer);
@@ -164,7 +163,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* For example,
*
* Scope (_PR)
- * {
+ * {
* Processor (CPU0, 0x00, 0x00000410, 0x06) {}
* Processor (CPU1, 0x01, 0x00000410, 0x06) {}
* Processor (CPU2, 0x02, 0x00000410, 0x06) {}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 2ba8f02ced36..782a0d15c25f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -200,7 +200,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
status = acpi_resource_to_address64(ares, &addr);
if (ACPI_FAILURE(status))
- return true;
+ return false;
res->start = addr.minimum;
res->end = addr.maximum;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1b1cf558d3d3..16914cc30882 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2214,7 +2214,7 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
&dep_devices);
if (ACPI_FAILURE(status)) {
- dev_err(&adev->dev, "Failed to evaluate _DEP.\n");
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
return;
}
@@ -2224,7 +2224,7 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
- dev_err(&adev->dev, "Error reading device info\n");
+ dev_dbg(&adev->dev, "Error reading _DEP device info\n");
continue;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index dd8ff63ee2b4..cd49a3982b6a 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -346,22 +346,16 @@ acpi_evaluate_reference(acpi_handle handle,
package = buffer.pointer;
if ((buffer.length == 0) || !package) {
- printk(KERN_ERR PREFIX "No return object (len %X ptr %p)\n",
- (unsigned)buffer.length, package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->type != ACPI_TYPE_PACKAGE) {
- printk(KERN_ERR PREFIX "Expecting a [Package], found type %X\n",
- package->type);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (!package->package.count) {
- printk(KERN_ERR PREFIX "[Package] has zero elements (%p)\n",
- package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
@@ -380,17 +374,13 @@ acpi_evaluate_reference(acpi_handle handle,
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
- printk(KERN_ERR PREFIX
- "Expecting a [Reference] package element, found type %X\n",
- element->type);
acpi_util_eval_error(handle, pathname, status);
break;
}
if (!element->reference.handle) {
- printk(KERN_WARNING PREFIX "Invalid reference in"
- " package %s\n", pathname);
status = AE_NULL_ENTRY;
+ acpi_util_eval_error(handle, pathname, status);
break;
}
/* Get the acpi_handle. */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 185a57d13723..1eaadff2e198 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -155,6 +155,7 @@ struct acpi_video_bus {
u8 dos_setting;
struct acpi_video_enumerated_device *attached_array;
u8 attached_count;
+ u8 child_count;
struct acpi_video_bus_cap cap;
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
@@ -1159,8 +1160,12 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
struct acpi_video_bus *video = device->video;
int i;
- /* If we have a broken _DOD, no need to test */
- if (!video->attached_count)
+ /*
+ * If we have a broken _DOD or we have more than 8 output devices
+ * under the graphics controller node that we can't proper deal with
+ * in the operation region code currently, no need to test.
+ */
+ if (!video->attached_count || video->child_count > 8)
return true;
for (i = 0; i < video->attached_count; i++) {
@@ -1413,6 +1418,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
dev_err(&dev->dev, "Can't attach device\n");
break;
}
+ video->child_count++;
}
return status;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cd4cccbfd2ab..a3a13605a9c4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -61,7 +61,7 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
- depends on ATA_ACPI && PM_RUNTIME
+ depends on ATA_ACPI && PM
default n
help
This option adds support for SATA Zero Power Optical Disc
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 2d195f3a1998..d24dd614a0bd 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -84,7 +84,11 @@ struct dev_pm_opp {
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library
+ * meant for book keeping and private to OPP library.
+ *
+ * Because the opp structures can be used from both rcu and srcu readers, we
+ * need to wait for the grace period of both of them before freeing any
+ * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
struct device_opp {
struct list_head node;
@@ -382,12 +386,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+static struct device_opp *add_device_opp(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ /*
+ * Allocate a new device OPP table. In the infrequent case where a new
+ * device is needed to be added, we pay this penalty.
+ */
+ dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
+ if (!dev_opp)
+ return NULL;
+
+ dev_opp->dev = dev;
+ srcu_init_notifier_head(&dev_opp->srcu_head);
+ INIT_LIST_HEAD(&dev_opp->opp_list);
+
+ /* Secure the device list modification */
+ list_add_rcu(&dev_opp->node, &dev_opp_list);
+ return dev_opp;
+}
+
static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
unsigned long u_volt, bool dynamic)
{
struct device_opp *dev_opp = NULL;
struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
+ int ret;
/* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
@@ -400,7 +426,6 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
mutex_lock(&dev_opp_list_lock);
/* populate the opp table */
- new_opp->dev_opp = dev_opp;
new_opp->rate = freq;
new_opp->u_volt = u_volt;
new_opp->available = true;
@@ -409,27 +434,12 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- /*
- * Allocate a new device OPP table. In the infrequent case
- * where a new device is needed to be added, we pay this
- * penalty.
- */
- dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
+ dev_opp = add_device_opp(dev);
if (!dev_opp) {
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- dev_warn(dev,
- "%s: Unable to create device OPP structure\n",
- __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_opp;
}
- dev_opp->dev = dev;
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
-
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
head = &dev_opp->opp_list;
goto list_add;
}
@@ -448,18 +458,17 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
/* Duplicate OPPs ? */
if (new_opp->rate == opp->rate) {
- int ret = opp->available && new_opp->u_volt == opp->u_volt ?
+ ret = opp->available && new_opp->u_volt == opp->u_volt ?
0 : -EEXIST;
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
__func__, opp->rate, opp->u_volt, opp->available,
new_opp->rate, new_opp->u_volt, new_opp->available);
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- return ret;
+ goto free_opp;
}
list_add:
+ new_opp->dev_opp = dev_opp;
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
@@ -469,6 +478,11 @@ list_add:
*/
srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
+
+free_opp:
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return ret;
}
/**
@@ -511,10 +525,11 @@ static void kfree_device_rcu(struct rcu_head *head)
{
struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
- kfree(device_opp);
+ kfree_rcu(device_opp, rcu_head);
}
-void __dev_pm_opp_remove(struct device_opp *dev_opp, struct dev_pm_opp *opp)
+static void __dev_pm_opp_remove(struct device_opp *dev_opp,
+ struct dev_pm_opp *opp)
{
/*
* Notify the changes in the availability of the operable
@@ -592,7 +607,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+ struct device_opp *dev_opp;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -606,12 +621,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
mutex_lock(&dev_opp_list_lock);
/* Find the device_opp */
- list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
- if (dev == tmp_dev_opp->dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
+ dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
r = PTR_ERR(dev_opp);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -768,7 +778,7 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
*/
void of_free_opp_table(struct device *dev)
{
- struct device_opp *dev_opp = find_device_opp(dev);
+ struct device_opp *dev_opp;
struct dev_pm_opp *opp, *tmp;
/* Check for existing list for 'dev' */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 27b71a0b72d0..3ec85dfce124 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2370,8 +2370,12 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
opcode = CEPH_OSD_OP_READ;
}
- osd_req_op_extent_init(osd_request, num_ops, opcode, offset, length,
- 0, 0);
+ if (opcode == CEPH_OSD_OP_DELETE)
+ osd_req_op_init(osd_request, num_ops, opcode);
+ else
+ osd_req_op_extent_init(osd_request, num_ops, opcode,
+ offset, length, 0, 0);
+
if (obj_request->type == OBJ_REQUEST_BIO)
osd_req_op_extent_osd_data_bio(osd_request, num_ops,
obj_request->bio_list, length);
@@ -3405,8 +3409,7 @@ err_rq:
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- if (snapc)
- ceph_put_snap_context(snapc);
+ ceph_put_snap_context(snapc);
blk_end_request_all(rq, result);
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fce758896280..1ee27ac18de0 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
+ { USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0930, 0x0227) },
@@ -140,6 +141,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 31dd24ac9926..19cf2cf22e87 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -167,6 +167,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1405b393c93d..742eefba12c2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
pid->integral += fp_error;
- /* limit the integral term */
+ /*
+ * We limit the integral here so that it will never
+ * get higher than 30. This prevents it from becoming
+ * too large an input over long periods of time and allows
+ * it to get factored out sooner.
+ *
+ * The value of 30 was chosen through experimentation.
+ */
integral_limit = int_tofp(30);
if (pid->integral > integral_limit)
pid->integral = integral_limit;
@@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
if (limits.no_turbo || limits.turbo_disabled)
max_perf = cpu->pstate.max_pstate;
+ /*
+ * performance can be limited by user through sysfs, by cpufreq
+ * policy, or by cpu specific default values determined through
+ * experimentation.
+ */
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
u32 duration_us;
u32 sample_time;
+ /*
+ * core_busy is the ratio of actual performance to max
+ * max_pstate is the max non turbo pstate available
+ * current_pstate was the pstate that was requested during
+ * the last sample period.
+ *
+ * We normalize core_busy, which was our actual percent
+ * performance to what we requested during the last sample
+ * period. The result will be a percentage of busy at a
+ * specified pstate.
+ */
core_busy = cpu->sample.core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ /*
+ * Since we have a deferred timer, it will not fire unless
+ * we are in C0. So, determine if the actual elapsed time
+ * is significantly greater (3x) than our sample interval. If it
+ * is, then we were idle for a long enough period of time
+ * to adjust our busyness.
+ */
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
duration_us = (u32) ktime_us_delta(cpu->sample.time,
cpu->last_sample_time);
@@ -948,6 +978,7 @@ static struct cpufreq_driver intel_pstate_driver = {
static int __initdata no_load;
static int __initdata no_hwp;
+static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
{
@@ -1094,7 +1125,8 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
case PSS:
return intel_pstate_no_acpi_pss();
case PPC:
- return intel_pstate_has_acpi_ppc();
+ return intel_pstate_has_acpi_ppc() &&
+ (!force_load);
}
}
@@ -1175,6 +1207,8 @@ static int __init intel_pstate_setup(char *str)
no_load = 1;
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "force"))
+ force_load = 1;
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c913906a719e..0f6b229afcb9 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -1,5 +1,5 @@
/*
- * (C) 2001-2004 Dave Jones. <davej@redhat.com>
+ * (C) 2001-2004 Dave Jones.
* (C) 2002 Padraig Brady. <padraig@antefacto.com>
*
* Licensed under the terms of the GNU GPL License version 2.
@@ -1008,7 +1008,7 @@ MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
module_param(enable, int, 0644);
MODULE_PARM_DESC(enable, "Enable driver");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f91027259c3c..e6f24b281e3e 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -300,7 +300,7 @@ static void __exit powernow_k6_exit(void)
}
-MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
"Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index e61e224475ad..37c5742482d8 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -1,7 +1,6 @@
/*
* AMD K7 Powernow driver.
* (C) 2003 Dave Jones on behalf of SuSE Labs.
- * (C) 2003-2004 Dave Jones <davej@redhat.com>
*
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
@@ -701,7 +700,7 @@ static void __exit powernow_exit(void)
module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 1a07b5904ed5..e56d632a8b21 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -378,8 +378,7 @@ static void __exit speedstep_exit(void)
}
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
- "Dominik Brodowski <linux@brodo.de>");
+MODULE_AUTHOR("Dave Jones, Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
"with ICH-M southbridges.");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index e9248bb9173a..aedec0957934 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -16,13 +16,10 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/opal.h>
#include <asm/runlatch.h>
-/* Flags and constants used in PowerNV platform */
-
#define MAX_POWERNV_IDLE_STATES 8
-#define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */
-#define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -197,7 +194,7 @@ static int powernv_add_idle_states(void)
* target residency to be 10x exit_latency
*/
latency_ns = be32_to_cpu(idle_state_latency[i]);
- if (flags & IDLE_USE_INST_NAP) {
+ if (flags & OPAL_PM_NAP_ENABLED) {
/* Add NAP state */
strcpy(powernv_states[nr_idle_states].name, "Nap");
strcpy(powernv_states[nr_idle_states].desc, "Nap");
@@ -210,7 +207,8 @@ static int powernv_add_idle_states(void)
nr_idle_states++;
}
- if (flags & IDLE_USE_INST_SLEEP) {
+ if (flags & OPAL_PM_SLEEP_ENABLED ||
+ flags & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
strcpy(powernv_states[nr_idle_states].name, "FastSleep");
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 6753fd940c76..fe41d5ae7cb2 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -177,6 +177,10 @@ static struct attribute *lm75_attrs[] = {
};
ATTRIBUTE_GROUPS(lm75);
+static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
+ .get_temp = lm75_read_temp,
+};
+
/*-----------------------------------------------------------------------*/
/* device probe and removal */
@@ -296,10 +300,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(data->hwmon_dev))
return PTR_ERR(data->hwmon_dev);
- data->tz = thermal_zone_of_sensor_register(data->hwmon_dev,
- 0,
+ data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
data->hwmon_dev,
- lm75_read_temp, NULL);
+ &lm75_of_thermal_ops);
if (IS_ERR(data->tz))
data->tz = NULL;
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index fd9a945fe8db..112e4d45e4a0 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -486,6 +486,10 @@ static const struct attribute_group ntc_attr_group = {
.attrs = ntc_attributes,
};
+static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
+ .get_temp = ntc_read_temp,
+};
+
static int ntc_thermistor_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -579,7 +583,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
pdev_id->name);
data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
- ntc_read_temp, NULL);
+ &ntc_of_thermal_ops);
if (IS_ERR(data->tz)) {
dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
data->tz = NULL;
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 51719956cc03..ba9f478f64ee 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -158,6 +158,10 @@ ATTRIBUTE_GROUPS(tmp102);
#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
+ .get_temp = tmp102_read_temp,
+};
+
static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -215,7 +219,7 @@ static int tmp102_probe(struct i2c_client *client,
}
tmp102->hwmon_dev = hwmon_dev;
tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- tmp102_read_temp, NULL);
+ &tmp102_of_thermal_ops);
if (IS_ERR(tmp102->tz))
tmp102->tz = NULL;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f08dd20f625c..31e8308ba899 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1073,4 +1073,15 @@ config SCx200_ACB
This support is also available as a module. If so, the module
will be called scx200_acb.
+config I2C_OPAL
+ tristate "IBM OPAL I2C driver"
+ depends on PPC_POWERNV
+ default y
+ help
+ This exposes the PowerNV platform i2c busses to the linux i2c layer,
+ the driver is based on the OPAL interfaces.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-opal.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5e6c8223719e..56388f658d2f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
new file mode 100644
index 000000000000..16f90b1a7508
--- /dev/null
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -0,0 +1,294 @@
+/*
+ * IBM OPAL I2C driver
+ * Copyright (C) 2014 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/firmware.h>
+#include <asm/opal.h>
+
+static int i2c_opal_translate_error(int rc)
+{
+ switch (rc) {
+ case OPAL_NO_MEM:
+ return -ENOMEM;
+ case OPAL_PARAMETER:
+ return -EINVAL;
+ case OPAL_I2C_ARBT_LOST:
+ return -EAGAIN;
+ case OPAL_I2C_TIMEOUT:
+ return -ETIMEDOUT;
+ case OPAL_I2C_NACK_RCVD:
+ return -ENXIO;
+ case OPAL_I2C_STOP_ERR:
+ return -EBUSY;
+ default:
+ return -EIO;
+ }
+}
+
+static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
+{
+ struct opal_msg msg;
+ int token, rc;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("Failed to get the async token\n");
+
+ return token;
+ }
+
+ rc = opal_i2c_request(token, bus_id, req);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ rc = i2c_opal_translate_error(rc);
+ goto exit;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ if (rc)
+ goto exit;
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc != OPAL_SUCCESS) {
+ rc = i2c_opal_translate_error(rc);
+ goto exit;
+ }
+
+exit:
+ opal_async_release_token(token);
+ return rc;
+}
+
+static int i2c_opal_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ unsigned long opal_id = (unsigned long)adap->algo_data;
+ struct opal_i2c_request req;
+ int rc, i;
+
+ /* We only support fairly simple combinations here of one
+ * or two messages
+ */
+ memset(&req, 0, sizeof(req));
+ switch(num) {
+ case 0:
+ return 0;
+ case 1:
+ req.type = (msgs[0].flags & I2C_M_RD) ?
+ OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+ req.addr = cpu_to_be16(msgs[0].addr);
+ req.size = cpu_to_be32(msgs[0].len);
+ req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf));
+ break;
+ case 2:
+ /* For two messages, we basically support only simple
+ * smbus transactions of a write plus a read. We might
+ * want to allow also two writes but we'd have to bounce
+ * the data into a single buffer.
+ */
+ if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD))
+ return -EOPNOTSUPP;
+ if (msgs[0].len > 4)
+ return -EOPNOTSUPP;
+ if (msgs[0].addr != msgs[1].addr)
+ return -EOPNOTSUPP;
+ req.type = OPAL_I2C_SM_READ;
+ req.addr = cpu_to_be16(msgs[0].addr);
+ req.subaddr_sz = msgs[0].len;
+ for (i = 0; i < msgs[0].len; i++)
+ req.subaddr = (req.subaddr << 8) | msgs[0].buf[i];
+ req.subaddr = cpu_to_be32(req.subaddr);
+ req.size = cpu_to_be32(msgs[1].len);
+ req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = i2c_opal_send_request(opal_id, &req);
+ if (rc)
+ return rc;
+
+ return num;
+}
+
+static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ unsigned long opal_id = (unsigned long)adap->algo_data;
+ struct opal_i2c_request req;
+ u8 local[2];
+ int rc;
+
+ memset(&req, 0, sizeof(req));
+
+ req.addr = cpu_to_be16(addr);
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+ req.size = cpu_to_be32(1);
+ /* Fall through */
+ case I2C_SMBUS_QUICK:
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+ req.size = cpu_to_be32(1);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (!read_write) {
+ local[0] = data->word & 0xff;
+ local[1] = (data->word >> 8) & 0xff;
+ }
+ req.buffer_ra = cpu_to_be64(__pa(local));
+ req.size = cpu_to_be32(2);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ req.buffer_ra = cpu_to_be64(__pa(&data->block[1]));
+ req.size = cpu_to_be32(data->block[0]);
+ req.subaddr = cpu_to_be32(command);
+ req.subaddr_sz = 1;
+ req.type = (read_write == I2C_SMBUS_READ) ?
+ OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_opal_send_request(opal_id, &req);
+ if (!rc && read_write && size == I2C_SMBUS_WORD_DATA) {
+ data->word = ((u16)local[1]) << 8;
+ data->word |= local[0];
+ }
+
+ return rc;
+}
+
+static u32 i2c_opal_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static const struct i2c_algorithm i2c_opal_algo = {
+ .master_xfer = i2c_opal_master_xfer,
+ .smbus_xfer = i2c_opal_smbus_xfer,
+ .functionality = i2c_opal_func,
+};
+
+static int i2c_opal_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ const char *pname;
+ u32 opal_id;
+ int rc;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "ibm,opal-id", &opal_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Missing ibm,opal-id property !\n");
+ return -EIO;
+ }
+
+ adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ adapter->algo = &i2c_opal_algo;
+ adapter->algo_data = (void *)(unsigned long)opal_id;
+ adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = of_node_get(pdev->dev.of_node);
+ pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
+ if (pname)
+ strlcpy(adapter->name, pname, sizeof(adapter->name));
+ else
+ strlcpy(adapter->name, "opal", sizeof(adapter->name));
+
+ platform_set_drvdata(pdev, adapter);
+ rc = i2c_add_adapter(adapter);
+ if (rc)
+ dev_err(&pdev->dev, "Failed to register the i2c adapter\n");
+
+ return rc;
+}
+
+static int i2c_opal_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(adapter);
+
+ return 0;
+}
+
+static const struct of_device_id i2c_opal_of_match[] = {
+ {
+ .compatible = "ibm,opal-i2c",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2c_opal_of_match);
+
+static struct platform_driver i2c_opal_driver = {
+ .probe = i2c_opal_probe,
+ .remove = i2c_opal_remove,
+ .driver = {
+ .name = "i2c-opal",
+ .of_match_table = i2c_opal_of_match,
+ },
+};
+
+static int __init i2c_opal_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ return platform_driver_register(&i2c_opal_driver);
+}
+module_init(i2c_opal_init);
+
+static void __exit i2c_opal_exit(void)
+{
+ return platform_driver_unregister(&i2c_opal_driver);
+}
+module_exit(i2c_opal_exit);
+
+MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM OPAL I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 77089399359b..b899531498eb 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -38,6 +38,17 @@ config INFINIBAND_USER_MEM
depends on INFINIBAND_USER_ACCESS != n
default y
+config INFINIBAND_ON_DEMAND_PAGING
+ bool "InfiniBand on-demand paging support"
+ depends on INFINIBAND_USER_MEM
+ select MMU_NOTIFIER
+ default y
+ ---help---
+ On demand paging support for the InfiniBand subsystem.
+ Together with driver support this allows registration of
+ memory regions without pinning their pages, fetching the
+ pages on demand instead.
+
config INFINIBAND_ADDR_TRANS
bool
depends on INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index ffd0af6734af..acf736764445 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8172d37f9add..f80da50d84a5 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -176,8 +176,8 @@ static void set_timeout(unsigned long time)
unsigned long delay;
delay = time - jiffies;
- if ((long)delay <= 0)
- delay = 1;
+ if ((long)delay < 0)
+ delay = 0;
mod_delayed_work(addr_wq, &work, delay);
}
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index d2360a8ef0b2..fa17b552ff78 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -525,17 +525,22 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
if (status)
process_join_error(group, status);
else {
+ int mgids_changed, is_mgid0;
ib_find_pkey(group->port->dev->device, group->port->port_num,
be16_to_cpu(rec->pkey), &pkey_index);
spin_lock_irq(&group->port->lock);
- group->rec = *rec;
if (group->state == MCAST_BUSY &&
group->pkey_index == MCAST_INVALID_PKEY_INDEX)
group->pkey_index = pkey_index;
- if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
+ mgids_changed = memcmp(&rec->mgid, &group->rec.mgid,
+ sizeof(group->rec.mgid));
+ group->rec = *rec;
+ if (mgids_changed) {
rb_erase(&group->node, &group->port->table);
- mcast_insert(group->port, group, 1);
+ is_mgid0 = !memcmp(&mgid0, &group->rec.mgid,
+ sizeof(mgid0));
+ mcast_insert(group->port, group, is_mgid0);
}
spin_unlock_irq(&group->port->lock);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index df0c4f605a21..aec7a6aa2951 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,6 +39,7 @@
#include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include <linux/slab.h>
+#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -69,6 +70,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
/**
* ib_umem_get - Pin and DMA map userspace memory.
+ *
+ * If access flags indicate ODP memory, avoid pinning. Instead, stores
+ * the mm for future page fault handling in conjunction with MMU notifiers.
+ *
* @context: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
@@ -103,17 +108,30 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->context = context;
umem->length = size;
- umem->offset = addr & ~PAGE_MASK;
+ umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
- * We ask for writable memory if any access flags other than
- * "remote read" are set. "Local write" and "remote write"
+ * We ask for writable memory if any of the following
+ * access flags are set. "Local write" and "remote write"
* obviously require write access. "Remote atomic" can do
* things like fetch and add, which will modify memory, and
* "MW bind" can change permissions by binding a window.
*/
- umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
+ umem->writable = !!(access &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+
+ if (access & IB_ACCESS_ON_DEMAND) {
+ ret = ib_umem_odp_get(context, umem);
+ if (ret) {
+ kfree(umem);
+ return ERR_PTR(ret);
+ }
+ return umem;
+ }
+
+ umem->odp_data = NULL;
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -132,7 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!vma_list)
umem->hugetlb = 0;
- npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
+ npages = ib_umem_num_pages(umem);
down_write(&current->mm->mmap_sem);
@@ -235,6 +253,11 @@ void ib_umem_release(struct ib_umem *umem)
struct task_struct *task;
unsigned long diff;
+ if (umem->odp_data) {
+ ib_umem_odp_release(umem);
+ return;
+ }
+
__ib_umem_release(umem->context->device, umem, 1);
task = get_pid_task(umem->pid, PIDTYPE_PID);
@@ -246,7 +269,7 @@ void ib_umem_release(struct ib_umem *umem)
if (!mm)
goto out;
- diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+ diff = ib_umem_num_pages(umem);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -283,6 +306,9 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
+ if (umem->odp_data)
+ return ib_umem_num_pages(umem);
+
shift = ilog2(umem->page_size);
n = 0;
@@ -292,3 +318,37 @@ int ib_umem_page_count(struct ib_umem *umem)
return n;
}
EXPORT_SYMBOL(ib_umem_page_count);
+
+/*
+ * Copy from the given ib_umem's pages to the given buffer.
+ *
+ * umem - the umem to copy from
+ * offset - offset to start copying from
+ * dst - destination buffer
+ * length - buffer length
+ *
+ * Returns 0 on success, or an error code.
+ */
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length)
+{
+ size_t end = offset + length;
+ int ret;
+
+ if (offset > umem->length || length > umem->length - offset) {
+ pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
+ offset, umem->length, end);
+ return -EINVAL;
+ }
+
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ offset + ib_umem_offset(umem));
+
+ if (ret < 0)
+ return ret;
+ else if (ret != length)
+ return -EINVAL;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_copy_from);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
new file mode 100644
index 000000000000..6095872549e7
--- /dev/null
+++ b/drivers/infiniband/core/umem_odp.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+static void ib_umem_notifier_start_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ int notifiers_count = item->odp_data->notifiers_count++;
+
+ if (notifiers_count == 0)
+ /* Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one
+ * should be waiting right now. */
+ reinit_completion(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+static void ib_umem_notifier_end_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ /*
+ * This sequence increase will notify the QP page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ */
+ ++item->odp_data->notifiers_seq;
+ if (--item->odp_data->notifiers_count == 0)
+ complete_all(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+/* Account for a new mmu notifier in an ib_ucontext. */
+static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+{
+ atomic_inc(&context->notifier_count);
+}
+
+/* Account for a terminating mmu notifier in an ib_ucontext.
+ *
+ * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
+ * the function takes the semaphore itself. */
+static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+{
+ int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
+
+ if (zero_notifiers &&
+ !list_empty(&context->no_private_counters)) {
+ /* No currently running mmu notifiers. Now is the chance to
+ * add private accounting to all previously added umems. */
+ struct ib_umem_odp *odp_data, *next;
+
+ /* Prevent concurrent mmu notifiers from working on the
+ * no_private_counters list. */
+ down_write(&context->umem_rwsem);
+
+ /* Read the notifier_count again, with the umem_rwsem
+ * semaphore taken for write. */
+ if (!atomic_read(&context->notifier_count)) {
+ list_for_each_entry_safe(odp_data, next,
+ &context->no_private_counters,
+ no_private_counters) {
+ mutex_lock(&odp_data->umem_mutex);
+ odp_data->mn_counters_active = true;
+ list_del(&odp_data->no_private_counters);
+ complete_all(&odp_data->notifier_completion);
+ mutex_unlock(&odp_data->umem_mutex);
+ }
+ }
+
+ up_write(&context->umem_rwsem);
+ }
+}
+
+static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie) {
+ /*
+ * Increase the number of notifiers running, to
+ * prevent any further fault handling on this MR.
+ */
+ ib_umem_notifier_start_account(item);
+ item->odp_data->dying = 1;
+ /* Make sure that the fact the umem is dying is out before we release
+ * all pending page faults. */
+ smp_wmb();
+ complete_all(&item->odp_data->notifier_completion);
+ item->context->invalidate_range(item, ib_umem_start(item),
+ ib_umem_end(item));
+ return 0;
+}
+
+static void ib_umem_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
+ ULLONG_MAX,
+ ib_umem_notifier_release_trampoline,
+ NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
+ address + PAGE_SIZE,
+ invalidate_page_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, end);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_start_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_end_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static struct mmu_notifier_ops ib_umem_notifiers = {
+ .release = ib_umem_notifier_release,
+ .invalidate_page = ib_umem_notifier_invalidate_page,
+ .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
+ .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
+};
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
+{
+ int ret_val;
+ struct pid *our_pid;
+ struct mm_struct *mm = get_task_mm(current);
+
+ if (!mm)
+ return -EINVAL;
+
+ /* Prevent creating ODP MRs in child processes */
+ rcu_read_lock();
+ our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+ put_pid(our_pid);
+ if (context->tgid != our_pid) {
+ ret_val = -EINVAL;
+ goto out_mm;
+ }
+
+ umem->hugetlb = 0;
+ umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
+ if (!umem->odp_data) {
+ ret_val = -ENOMEM;
+ goto out_mm;
+ }
+ umem->odp_data->umem = umem;
+
+ mutex_init(&umem->odp_data->umem_mutex);
+
+ init_completion(&umem->odp_data->notifier_completion);
+
+ umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->page_list));
+ if (!umem->odp_data->page_list) {
+ ret_val = -ENOMEM;
+ goto out_odp_data;
+ }
+
+ umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->dma_list));
+ if (!umem->odp_data->dma_list) {
+ ret_val = -ENOMEM;
+ goto out_page_list;
+ }
+
+ /*
+ * When using MMU notifiers, we will get a
+ * notification before the "current" task (and MM) is
+ * destroyed. We use the umem_rwsem semaphore to synchronize.
+ */
+ down_write(&context->umem_rwsem);
+ context->odp_mrs_count++;
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ if (likely(!atomic_read(&context->notifier_count)))
+ umem->odp_data->mn_counters_active = true;
+ else
+ list_add(&umem->odp_data->no_private_counters,
+ &context->no_private_counters);
+ downgrade_write(&context->umem_rwsem);
+
+ if (context->odp_mrs_count == 1) {
+ /*
+ * Note that at this point, no MMU notifier is running
+ * for this context!
+ */
+ atomic_set(&context->notifier_count, 0);
+ INIT_HLIST_NODE(&context->mn.hlist);
+ context->mn.ops = &ib_umem_notifiers;
+ /*
+ * Lock-dep detects a false positive for mmap_sem vs.
+ * umem_rwsem, due to not grasping downgrade_write correctly.
+ */
+ lockdep_off();
+ ret_val = mmu_notifier_register(&context->mn, mm);
+ lockdep_on();
+ if (ret_val) {
+ pr_err("Failed to register mmu_notifier %d\n", ret_val);
+ ret_val = -EBUSY;
+ goto out_mutex;
+ }
+ }
+
+ up_read(&context->umem_rwsem);
+
+ /*
+ * Note that doing an mmput can cause a notifier for the relevant mm.
+ * If the notifier is called while we hold the umem_rwsem, this will
+ * cause a deadlock. Therefore, we release the reference only after we
+ * released the semaphore.
+ */
+ mmput(mm);
+ return 0;
+
+out_mutex:
+ up_read(&context->umem_rwsem);
+ vfree(umem->odp_data->dma_list);
+out_page_list:
+ vfree(umem->odp_data->page_list);
+out_odp_data:
+ kfree(umem->odp_data);
+out_mm:
+ mmput(mm);
+ return ret_val;
+}
+
+void ib_umem_odp_release(struct ib_umem *umem)
+{
+ struct ib_ucontext *context = umem->context;
+
+ /*
+ * Ensure that no more pages are mapped in the umem.
+ *
+ * It is the driver's responsibility to ensure, before calling us,
+ * that the hardware will not attempt to access the MR any more.
+ */
+ ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+
+ down_write(&context->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ context->odp_mrs_count--;
+ if (!umem->odp_data->mn_counters_active) {
+ list_del(&umem->odp_data->no_private_counters);
+ complete_all(&umem->odp_data->notifier_completion);
+ }
+
+ /*
+ * Downgrade the lock to a read lock. This ensures that the notifiers
+ * (who lock the mutex for reading) will be able to finish, and we
+ * will be able to enventually obtain the mmu notifiers SRCU. Note
+ * that since we are doing it atomically, no other user could register
+ * and unregister while we do the check.
+ */
+ downgrade_write(&context->umem_rwsem);
+ if (!context->odp_mrs_count) {
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(context->tgid,
+ PIDTYPE_PID);
+ if (owning_process == NULL)
+ /*
+ * The process is already dead, notifier were removed
+ * already.
+ */
+ goto out;
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL)
+ /*
+ * The process' mm is already dead, notifier were
+ * removed already.
+ */
+ goto out_put_task;
+ mmu_notifier_unregister(&context->mn, owning_mm);
+
+ mmput(owning_mm);
+
+out_put_task:
+ put_task_struct(owning_process);
+ }
+out:
+ up_read(&context->umem_rwsem);
+
+ vfree(umem->odp_data->dma_list);
+ vfree(umem->odp_data->page_list);
+ kfree(umem->odp_data);
+ kfree(umem);
+}
+
+/*
+ * Map for DMA and insert a single page into the on-demand paging page tables.
+ *
+ * @umem: the umem to insert the page to.
+ * @page_index: index in the umem to add the page to.
+ * @page: the page struct to map and add.
+ * @access_mask: access permissions needed for this page.
+ * @current_seq: sequence number for synchronization with invalidations.
+ * the sequence number is taken from
+ * umem->odp_data->notifiers_seq.
+ *
+ * The function returns -EFAULT if the DMA mapping operation fails. It returns
+ * -EAGAIN if a concurrent invalidation prevents us from updating the page.
+ *
+ * The page is released via put_page even if the operation failed. For
+ * on-demand pinning, the page is released whenever it isn't stored in the
+ * umem.
+ */
+static int ib_umem_odp_map_dma_single_page(
+ struct ib_umem *umem,
+ int page_index,
+ u64 base_virt_addr,
+ struct page *page,
+ u64 access_mask,
+ unsigned long current_seq)
+{
+ struct ib_device *dev = umem->context->device;
+ dma_addr_t dma_addr;
+ int stored_page = 0;
+ int remove_existing_mapping = 0;
+ int ret = 0;
+
+ mutex_lock(&umem->odp_data->umem_mutex);
+ /*
+ * Note: we avoid writing if seq is different from the initial seq, to
+ * handle case of a racing notifier. This check also allows us to bail
+ * early if we have a notifier running in parallel with us.
+ */
+ if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (!(umem->odp_data->dma_list[page_index])) {
+ dma_addr = ib_dma_map_page(dev,
+ page,
+ 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(dev, dma_addr)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
+ umem->odp_data->page_list[page_index] = page;
+ stored_page = 1;
+ } else if (umem->odp_data->page_list[page_index] == page) {
+ umem->odp_data->dma_list[page_index] |= access_mask;
+ } else {
+ pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem->odp_data->page_list[page_index], page);
+ /* Better remove the mapping now, to prevent any further
+ * damage. */
+ remove_existing_mapping = 1;
+ }
+
+out:
+ mutex_unlock(&umem->odp_data->umem_mutex);
+
+ /* On Demand Paging - avoid pinning the page */
+ if (umem->context->invalidate_range || !stored_page)
+ put_page(page);
+
+ if (remove_existing_mapping && umem->context->invalidate_range) {
+ invalidate_page_trampoline(
+ umem,
+ base_virt_addr + (page_index * PAGE_SIZE),
+ base_virt_addr + ((page_index+1)*PAGE_SIZE),
+ NULL);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+/**
+ * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
+ *
+ * Pins the range of pages passed in the argument, and maps them to
+ * DMA addresses. The DMA addresses of the mapped pages is updated in
+ * umem->odp_data->dma_list.
+ *
+ * Returns the number of pages mapped in success, negative error code
+ * for failure.
+ * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
+ * the function from completing its task.
+ *
+ * @umem: the umem to map and pin
+ * @user_virt: the address from which we need to map.
+ * @bcnt: the minimal number of bytes to pin and map. The mapping might be
+ * bigger due to alignment, and may also be smaller in case of an error
+ * pinning or mapping a page. The actual pages mapped is returned in
+ * the return value.
+ * @access_mask: bit mask of the requested access permissions for the given
+ * range.
+ * @current_seq: the MMU notifiers sequance value for synchronization with
+ * invalidations. the sequance number is read from
+ * umem->odp_data->notifiers_seq before calling this function
+ */
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
+ u64 access_mask, unsigned long current_seq)
+{
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+ struct page **local_page_list = NULL;
+ u64 off;
+ int j, k, ret = 0, start_idx, npages = 0;
+ u64 base_virt_addr;
+
+ if (access_mask == 0)
+ return -EINVAL;
+
+ if (user_virt < ib_umem_start(umem) ||
+ user_virt + bcnt > ib_umem_end(umem))
+ return -EFAULT;
+
+ local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
+ if (!local_page_list)
+ return -ENOMEM;
+
+ off = user_virt & (~PAGE_MASK);
+ user_virt = user_virt & PAGE_MASK;
+ base_virt_addr = user_virt;
+ bcnt += off; /* Charge for the first page offset as well. */
+
+ owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
+ if (owning_process == NULL) {
+ ret = -EINVAL;
+ goto out_no_task;
+ }
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL) {
+ ret = -EINVAL;
+ goto out_put_task;
+ }
+
+ start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
+ k = start_idx;
+
+ while (bcnt > 0) {
+ const size_t gup_num_pages =
+ min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
+ PAGE_SIZE / sizeof(struct page *));
+
+ down_read(&owning_mm->mmap_sem);
+ /*
+ * Note: this might result in redundent page getting. We can
+ * avoid this by checking dma_list to be 0 before calling
+ * get_user_pages. However, this make the code much more
+ * complex (and doesn't gain us much performance in most use
+ * cases).
+ */
+ npages = get_user_pages(owning_process, owning_mm, user_virt,
+ gup_num_pages,
+ access_mask & ODP_WRITE_ALLOWED_BIT, 0,
+ local_page_list, NULL);
+ up_read(&owning_mm->mmap_sem);
+
+ if (npages < 0)
+ break;
+
+ bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
+ user_virt += npages << PAGE_SHIFT;
+ for (j = 0; j < npages; ++j) {
+ ret = ib_umem_odp_map_dma_single_page(
+ umem, k, base_virt_addr, local_page_list[j],
+ access_mask, current_seq);
+ if (ret < 0)
+ break;
+ k++;
+ }
+
+ if (ret < 0) {
+ /* Release left over pages when handling errors. */
+ for (++j; j < npages; ++j)
+ put_page(local_page_list[j]);
+ break;
+ }
+ }
+
+ if (ret >= 0) {
+ if (npages < 0 && k == start_idx)
+ ret = npages;
+ else
+ ret = k - start_idx;
+ }
+
+ mmput(owning_mm);
+out_put_task:
+ put_task_struct(owning_process);
+out_no_task:
+ free_page((unsigned long)local_page_list);
+ return ret;
+}
+EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+ u64 bound)
+{
+ int idx;
+ u64 addr;
+ struct ib_device *dev = umem->context->device;
+
+ virt = max_t(u64, virt, ib_umem_start(umem));
+ bound = min_t(u64, bound, ib_umem_end(umem));
+ /* Note that during the run of this function, the
+ * notifiers_count of the MR is > 0, preventing any racing
+ * faults from completion. We might be racing with other
+ * invalidations, so we must make sure we free each page only
+ * once. */
+ for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ mutex_lock(&umem->odp_data->umem_mutex);
+ if (umem->odp_data->page_list[idx]) {
+ struct page *page = umem->odp_data->page_list[idx];
+ struct page *head_page = compound_head(page);
+ dma_addr_t dma = umem->odp_data->dma_list[idx];
+ dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
+
+ WARN_ON(!dma_addr);
+
+ ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma & ODP_WRITE_ALLOWED_BIT)
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
+ /* on demand pinning support */
+ if (!umem->context->invalidate_range)
+ put_page(page);
+ umem->odp_data->page_list[idx] = NULL;
+ umem->odp_data->dma_list[idx] = 0;
+ }
+ mutex_unlock(&umem->odp_data->umem_mutex);
+ }
+}
+EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
new file mode 100644
index 000000000000..727d788448f5
--- /dev/null
+++ b/drivers/infiniband/core/umem_rbtree.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <rdma/ib_umem_odp.h>
+
+/*
+ * The ib_umem list keeps track of memory regions for which the HW
+ * device request to receive notification when the related memory
+ * mapping is changed.
+ *
+ * ib_umem_lock protects the list.
+ */
+
+static inline u64 node_start(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_start(umem_odp->umem);
+}
+
+/* Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not contained
+ * in the umem.
+ */
+static inline u64 node_last(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_end(umem_odp->umem) - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
+ node_start, node_last, , rbt_ib_umem)
+
+/* @last is not a part of the interval. See comment for function
+ * node_last.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root,
+ u64 start, u64 last,
+ umem_call_back cb,
+ void *cookie)
+{
+ int ret_val = 0;
+ struct umem_odp_node *node;
+ struct ib_umem_odp *umem;
+
+ if (unlikely(start == last))
+ return ret_val;
+
+ for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
+ node = rbt_ib_umem_iter_next(node, start, last - 1)) {
+ umem = container_of(node, struct ib_umem_odp, interval_tree);
+ ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ }
+
+ return ret_val;
+}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a5..b716b0815644 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ba2a86aab6a..532d8eba8b02 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
@@ -288,6 +289,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_uverbs_get_context_resp resp;
struct ib_udata udata;
struct ib_device *ibdev = file->device->ib_dev;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_device_attr dev_attr;
+#endif
struct ib_ucontext *ucontext;
struct file *filp;
int ret;
@@ -325,8 +329,25 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->ah_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
+ rcu_read_lock();
+ ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
ucontext->closing = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ ucontext->umem_tree = RB_ROOT;
+ init_rwsem(&ucontext->umem_rwsem);
+ ucontext->odp_mrs_count = 0;
+ INIT_LIST_HEAD(&ucontext->no_private_counters);
+
+ ret = ib_query_device(ibdev, &dev_attr);
+ if (ret)
+ goto err_free;
+ if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+ ucontext->invalidate_range = NULL;
+
+#endif
+
resp.num_comp_vectors = file->device->num_comp_vectors;
ret = get_unused_fd_flags(O_CLOEXEC);
@@ -371,6 +392,7 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
+ put_pid(ucontext->tgid);
ibdev->dealloc_ucontext(ucontext);
err:
@@ -378,6 +400,52 @@ err:
return ret;
}
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+ struct ib_uverbs_query_device_resp *resp,
+ struct ib_device_attr *attr)
+{
+ resp->fw_ver = attr->fw_ver;
+ resp->node_guid = file->device->ib_dev->node_guid;
+ resp->sys_image_guid = attr->sys_image_guid;
+ resp->max_mr_size = attr->max_mr_size;
+ resp->page_size_cap = attr->page_size_cap;
+ resp->vendor_id = attr->vendor_id;
+ resp->vendor_part_id = attr->vendor_part_id;
+ resp->hw_ver = attr->hw_ver;
+ resp->max_qp = attr->max_qp;
+ resp->max_qp_wr = attr->max_qp_wr;
+ resp->device_cap_flags = attr->device_cap_flags;
+ resp->max_sge = attr->max_sge;
+ resp->max_sge_rd = attr->max_sge_rd;
+ resp->max_cq = attr->max_cq;
+ resp->max_cqe = attr->max_cqe;
+ resp->max_mr = attr->max_mr;
+ resp->max_pd = attr->max_pd;
+ resp->max_qp_rd_atom = attr->max_qp_rd_atom;
+ resp->max_ee_rd_atom = attr->max_ee_rd_atom;
+ resp->max_res_rd_atom = attr->max_res_rd_atom;
+ resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
+ resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
+ resp->atomic_cap = attr->atomic_cap;
+ resp->max_ee = attr->max_ee;
+ resp->max_rdd = attr->max_rdd;
+ resp->max_mw = attr->max_mw;
+ resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
+ resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
+ resp->max_mcast_grp = attr->max_mcast_grp;
+ resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
+ resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+ resp->max_ah = attr->max_ah;
+ resp->max_fmr = attr->max_fmr;
+ resp->max_map_per_fmr = attr->max_map_per_fmr;
+ resp->max_srq = attr->max_srq;
+ resp->max_srq_wr = attr->max_srq_wr;
+ resp->max_srq_sge = attr->max_srq_sge;
+ resp->max_pkeys = attr->max_pkeys;
+ resp->local_ca_ack_delay = attr->local_ca_ack_delay;
+ resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+}
+
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@@ -398,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;
memset(&resp, 0, sizeof resp);
-
- resp.fw_ver = attr.fw_ver;
- resp.node_guid = file->device->ib_dev->node_guid;
- resp.sys_image_guid = attr.sys_image_guid;
- resp.max_mr_size = attr.max_mr_size;
- resp.page_size_cap = attr.page_size_cap;
- resp.vendor_id = attr.vendor_id;
- resp.vendor_part_id = attr.vendor_part_id;
- resp.hw_ver = attr.hw_ver;
- resp.max_qp = attr.max_qp;
- resp.max_qp_wr = attr.max_qp_wr;
- resp.device_cap_flags = attr.device_cap_flags;
- resp.max_sge = attr.max_sge;
- resp.max_sge_rd = attr.max_sge_rd;
- resp.max_cq = attr.max_cq;
- resp.max_cqe = attr.max_cqe;
- resp.max_mr = attr.max_mr;
- resp.max_pd = attr.max_pd;
- resp.max_qp_rd_atom = attr.max_qp_rd_atom;
- resp.max_ee_rd_atom = attr.max_ee_rd_atom;
- resp.max_res_rd_atom = attr.max_res_rd_atom;
- resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
- resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
- resp.atomic_cap = attr.atomic_cap;
- resp.max_ee = attr.max_ee;
- resp.max_rdd = attr.max_rdd;
- resp.max_mw = attr.max_mw;
- resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
- resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
- resp.max_mcast_grp = attr.max_mcast_grp;
- resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
- resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
- resp.max_ah = attr.max_ah;
- resp.max_fmr = attr.max_fmr;
- resp.max_map_per_fmr = attr.max_map_per_fmr;
- resp.max_srq = attr.max_srq;
- resp.max_srq_wr = attr.max_srq_wr;
- resp.max_srq_sge = attr.max_srq_sge;
- resp.max_pkeys = attr.max_pkeys;
- resp.local_ca_ack_delay = attr.local_ca_ack_delay;
- resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+ copy_query_dev_fields(file, &resp, &attr);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -947,6 +975,18 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
goto err_free;
}
+ if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
+ struct ib_device_attr attr;
+
+ ret = ib_query_device(pd->device, &attr);
+ if (ret || !(attr.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags, &udata);
if (IS_ERR(mr)) {
@@ -3253,3 +3293,52 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device cmd;
+ struct ib_device_attr attr;
+ struct ib_device *device;
+ int err;
+
+ device = file->device->ib_dev;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.reserved)
+ return -EINVAL;
+
+ err = device->query_device(device, &attr);
+ if (err)
+ return err;
+
+ memset(&resp, 0, sizeof(resp));
+ copy_query_dev_fields(file, &resp.base, &attr);
+ resp.comp_mask = 0;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
+ resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+ resp.odp_caps.per_transport_caps.rc_odp_caps =
+ attr.odp_caps.per_transport_caps.rc_odp_caps;
+ resp.odp_caps.per_transport_caps.uc_odp_caps =
+ attr.odp_caps.per_transport_caps.uc_odp_caps;
+ resp.odp_caps.per_transport_caps.ud_odp_caps =
+ attr.odp_caps.per_transport_caps.ud_odp_caps;
+ resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
+ }
+#endif
+
+ err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 71ab83fde472..e6c23b9eab33 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -122,7 +122,8 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
- [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow
+ [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
+ [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -296,6 +297,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ put_pid(context->tgid);
+
return context->device->dealloc_ucontext(context);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c2b89cc5dbca..f93eb8da7b5a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -879,7 +879,8 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
- qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
+ if (!(*qp_attr_mask & IB_QP_VID))
+ qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
} else {
ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 2d5cbf4363e4..bdf3507810cb 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -476,7 +476,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
c2mr->umem->page_size,
i,
length,
- c2mr->umem->offset,
+ ib_umem_offset(c2mr->umem),
&kva,
c2_convert_access(acc),
c2mr);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4b8c6116c058..9edc200b311d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1640,7 +1640,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
__state_set(&ep->com, MPA_REQ_RCVD);
/* drive upcall */
- mutex_lock(&ep->parent_ep->com.mutex);
+ mutex_lock_nested(&ep->parent_ep->com.mutex,
+ SINGLE_DEPTH_NESTING);
if (ep->parent_ep->com.state != DEAD) {
if (connect_request_upcall(ep))
abort_connection(ep, skb, GFP_KERNEL);
@@ -3126,6 +3127,8 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
if (err)
pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
err, ep->stid,
@@ -3159,6 +3162,8 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
}
if (err)
pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 72f1f052e88c..eb5df4e62703 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -670,7 +670,7 @@ static int ep_open(struct inode *inode, struct file *file)
idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
spin_unlock_irq(&epd->devp->lock);
- epd->bufsize = count * 160;
+ epd->bufsize = count * 240;
epd->buf = vmalloc(epd->bufsize);
if (!epd->buf) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0744455cd88b..cb43c2299ac0 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -50,6 +50,13 @@ static int inline_threshold = C4IW_INLINE_THRESHOLD;
module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
+{
+ return (is_t4(dev->rdev.lldi.adapter_type) ||
+ is_t5(dev->rdev.lldi.adapter_type)) &&
+ length >= 8*1024*1024*1024ULL;
+}
+
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data, int wait)
{
@@ -369,9 +376,11 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
int ret;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
- FW_RI_STAG_NSMR, mhp->attr.perms,
+ FW_RI_STAG_NSMR, mhp->attr.len ?
+ mhp->attr.perms : 0,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
- mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.va_fbo, mhp->attr.len ?
+ mhp->attr.len : -1, shift - 12,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
return ret;
@@ -536,6 +545,11 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
return ret;
}
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ return -EINVAL;
+ }
+
ret = reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret)
@@ -596,6 +610,12 @@ struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
if (ret)
goto err;
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ ret = -EINVAL;
+ goto err;
+ }
+
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
@@ -699,6 +719,10 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
php = to_c4iw_pd(pd);
rhp = php->rhp;
+
+ if (mr_exceeds_hw_limits(rhp, length))
+ return ERR_PTR(-EINVAL);
+
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 2ed3ece2b2ee..bb85d479e66e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1538,9 +1538,9 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
+ wake_up(&qhp->wait);
out:
mutex_unlock(&qhp->mutex);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 3488e8c9fcb4..f914b30999f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -399,7 +399,7 @@ reg_user_mr_fallback:
pginfo.num_kpages = num_kpages;
pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem;
- pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
+ pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 5e61e9bff697..c7278f6a8217 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -214,7 +214,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->mr.max_segs = n;
mr->umem = umem;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 8f9325cfc85d..c36ccbd9a644 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -223,7 +223,6 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
if (flags & IB_MR_REREG_TRANS) {
int shift;
- int err;
int n;
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 4ea0135af484..27a70159e2ea 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1ba6c42e4df8..8a87404e9c76 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -244,6 +244,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+#endif
+
out:
kfree(in_mad);
kfree(out_mad);
@@ -568,6 +574,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_count;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
+#endif
+
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -858,7 +868,7 @@ static ssize_t show_reg_pages(struct device *device,
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
+ return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -1321,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
+ dev->ib_dev.uverbs_ex_cmd_mask =
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -1366,6 +1378,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
+ mlx5_ib_internal_query_odp_caps(dev);
+
if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -1379,16 +1393,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_eqs;
mutex_init(&dev->cap_mask_mutex);
- spin_lock_init(&dev->mr_lock);
err = create_dev_resources(&dev->devr);
if (err)
goto err_eqs;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_odp_init_one(dev);
if (err)
goto err_rsrc;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_odp;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
@@ -1410,6 +1427,9 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_odp:
+ mlx5_ib_odp_remove_one(dev);
+
err_rsrc:
destroy_dev_resources(&dev->devr);
@@ -1425,8 +1445,10 @@ err_dealloc:
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_ib_dev *dev = context;
+
ib_unregister_device(&dev->ib_dev);
destroy_umrc_res(dev);
+ mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev);
@@ -1440,15 +1462,30 @@ static struct mlx5_interface mlx5_ib_interface = {
static int __init mlx5_ib_init(void)
{
+ int err;
+
if (deprecated_prof_sel != 2)
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
- return mlx5_register_interface(&mlx5_ib_interface);
+ err = mlx5_ib_odp_init();
+ if (err)
+ return err;
+
+ err = mlx5_register_interface(&mlx5_ib_interface);
+ if (err)
+ goto clean_odp;
+
+ return err;
+
+clean_odp:
+ mlx5_ib_odp_cleanup();
+ return err;
}
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ mlx5_ib_odp_cleanup();
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index dae07eae9507..b56e4c5593ee 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
/* @umem: umem object to scan
@@ -57,6 +58,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int entry;
unsigned long page_shift = ilog2(umem->page_size);
+ /* With ODP we must always match OS page size. */
+ if (umem->odp_data) {
+ *count = ib_umem_page_count(umem);
+ *shift = PAGE_SHIFT;
+ *ncont = *count;
+ if (order)
+ *order = ilog2(roundup_pow_of_two(*count));
+
+ return;
+ }
+
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp));
@@ -108,8 +120,36 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*count = i;
}
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr)
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+#endif
+
+/*
+ * Populate the given array with bus addresses from the umem.
+ *
+ * dev - mlx5_ib device
+ * umem - umem to use to fill the pages
+ * page_shift - determines the page size used in the resulting array
+ * offset - offset into the umem to start from,
+ * only implemented for ODP umems
+ * num_pages - total number of pages to fill
+ * pas - bus addresses array to fill
+ * access_flags - access flags to set on all present pages.
+ use enum mlx5_ib_mtt_access_flags for this.
+ */
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags)
{
unsigned long umem_page_shift = ilog2(umem->page_size);
int shift = page_shift - umem_page_shift;
@@ -120,6 +160,21 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int len;
struct scatterlist *sg;
int entry;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ const bool odp = umem->odp_data != NULL;
+
+ if (odp) {
+ WARN_ON(shift != 0);
+ WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
+
+ for (i = 0; i < num_pages; ++i) {
+ dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+ return;
+ }
+#endif
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -128,8 +183,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
cur = base + (k << umem_page_shift);
- if (umr)
- cur |= 3;
+ cur |= access_flags;
pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
@@ -142,6 +196,13 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
}
}
+void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, __be64 *pas, int access_flags)
+{
+ return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
+ ib_umem_num_pages(umem), pas,
+ access_flags);
+}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
u64 page_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 386780f0d1e1..83f22fe297c8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -111,6 +111,8 @@ struct mlx5_ib_pd {
*/
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
+#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
+#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
@@ -147,6 +149,29 @@ enum {
MLX5_QP_EMPTY
};
+/*
+ * Connect-IB can trigger up to four concurrent pagefaults
+ * per-QP.
+ */
+enum mlx5_ib_pagefault_context {
+ MLX5_IB_PAGEFAULT_RESPONDER_READ,
+ MLX5_IB_PAGEFAULT_REQUESTOR_READ,
+ MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
+ MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
+ MLX5_IB_PAGEFAULT_CONTEXTS
+};
+
+static inline enum mlx5_ib_pagefault_context
+ mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
+{
+ return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
+}
+
+struct mlx5_ib_pfault {
+ struct work_struct work;
+ struct mlx5_pagefault mpfault;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
struct mlx5_core_qp mqp;
@@ -192,6 +217,21 @@ struct mlx5_ib_qp {
/* Store signature errors */
bool signature_en;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * A flag that is true for QP's that are in a state that doesn't
+ * allow page faults, and shouldn't schedule any more faults.
+ */
+ int disable_page_faults;
+ /*
+ * The disable_page_faults_lock protects a QP's disable_page_faults
+ * field, allowing for a thread to atomically check whether the QP
+ * allows page faults, and if so schedule a page fault.
+ */
+ spinlock_t disable_page_faults_lock;
+ struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
+#endif
};
struct mlx5_ib_cq_buf {
@@ -206,6 +246,19 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
};
+struct mlx5_umr_wr {
+ union {
+ u64 virt_addr;
+ u64 offset;
+ } target;
+ struct ib_pd *pd;
+ unsigned int page_shift;
+ unsigned int npages;
+ u32 length;
+ int access_flags;
+ u32 mkey;
+};
+
struct mlx5_shared_mr_info {
int mr_id;
struct ib_umem *umem;
@@ -253,6 +306,13 @@ struct mlx5_ib_xrcd {
u32 xrcdn;
};
+enum mlx5_ib_mtt_access_flags {
+ MLX5_IB_MTT_READ = (1 << 0),
+ MLX5_IB_MTT_WRITE = (1 << 1),
+};
+
+#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_core_mr mmr;
@@ -261,12 +321,11 @@ struct mlx5_ib_mr {
struct list_head list;
int order;
int umred;
- __be64 *pas;
- dma_addr_t dma;
int npages;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig;
+ int live;
};
struct mlx5_ib_fast_reg_page_list {
@@ -372,11 +431,18 @@ struct mlx5_ib_dev {
struct umr_common umrc;
/* sync used page count stats
*/
- spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
int fill_delay;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_odp_caps odp_caps;
+ /*
+ * Sleepable RCU that prevents destruction of MRs while they are still
+ * being used by a page fault handler.
+ */
+ struct srcu_struct mr_srcu;
+#endif
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -490,6 +556,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata);
@@ -502,6 +570,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
+ int npages, int zap);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
@@ -533,8 +603,11 @@ int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order);
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags);
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr);
+ int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
@@ -544,6 +617,38 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+extern struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault);
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
+int __init mlx5_ib_odp_init(void);
+void mlx5_ib_odp_cleanup(void);
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end);
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
+static inline int mlx5_ib_odp_init(void) { return 0; }
+static inline void mlx5_ib_odp_cleanup(void) {}
+static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
+static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -561,4 +666,7 @@ static inline u8 convert_access(int acc)
MLX5_PERM_LOCAL_READ;
}
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5a80dd993761..32a28bd50b20 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -37,21 +37,34 @@
#include <linux/export.h>
#include <linux/delay.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
enum {
MAX_PENDING_REG_MR = 8,
};
-enum {
- MLX5_UMR_ALIGN = 2048
-};
+#define MLX5_UMR_ALIGN 2048
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static __be64 mlx5_ib_update_mtt_emergency_buffer[
+ MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
+ __aligned(MLX5_UMR_ALIGN);
+static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
+#endif
+
+static int clean_mr(struct mlx5_ib_mr *mr);
-static __be64 *mr_align(__be64 *ptr, int align)
+static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- unsigned long mask = align - 1;
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
- return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* Wait until all page fault handlers using the mr complete. */
+ synchronize_srcu(&dev->mr_srcu);
+#endif
+
+ return err;
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -146,7 +159,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr->order = ent->order;
mr->umred = 1;
mr->dev = dev;
- in->seg.status = 1 << 6;
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
@@ -191,7 +204,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -482,7 +495,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -668,7 +681,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
static int use_umr(int order)
{
- return order <= 17;
+ return order <= MLX5_MAX_UMR_SHIFT;
}
static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
@@ -678,6 +691,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_mr *mr = dev->umrc.mr;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -692,21 +706,24 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
wr->num_sge = 0;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.page_list_len = n;
- wr->wr.fast_reg.page_shift = page_shift;
- wr->wr.fast_reg.rkey = key;
- wr->wr.fast_reg.iova_start = virt_addr;
- wr->wr.fast_reg.length = len;
- wr->wr.fast_reg.access_flags = access_flags;
- wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
+
+ umrwr->npages = n;
+ umrwr->page_shift = page_shift;
+ umrwr->mkey = key;
+ umrwr->target.virt_addr = virt_addr;
+ umrwr->length = len;
+ umrwr->access_flags = access_flags;
+ umrwr->pd = pd;
}
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
struct ib_send_wr *wr, u32 key)
{
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
+ wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.rkey = key;
+ umrwr->mkey = key;
}
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
@@ -742,7 +759,10 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
- int size = sizeof(u64) * npages;
+ int size;
+ __be64 *mr_pas;
+ __be64 *pas;
+ dma_addr_t dma;
int err = 0;
int i;
@@ -761,25 +781,31 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr->pas) {
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the pas array, we allocate
+ * a little more. */
+ size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+ mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!mr_pas) {
err = -ENOMEM;
goto free_mr;
}
- mlx5_ib_populate_pas(dev, umem, page_shift,
- mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+ pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the actual pages. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
- mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, mr->dma)) {
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
err = -ENOMEM;
goto free_pas;
}
memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
+ prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
+ virt_addr, len, access_flags);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
@@ -799,12 +825,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->mmr.size = len;
mr->mmr.pd = to_mpd(pd)->pdn;
+ mr->live = 1;
+
unmap_dma:
up(&umrc->sem);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
free_pas:
- kfree(mr->pas);
+ kfree(mr_pas);
free_mr:
if (err) {
@@ -815,6 +843,128 @@ free_mr:
return mr;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
+ int zap)
+{
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct umr_common *umrc = &dev->umrc;
+ struct mlx5_ib_umr_context umr_context;
+ struct ib_umem *umem = mr->umem;
+ int size;
+ __be64 *pas;
+ dma_addr_t dma;
+ struct ib_send_wr wr, *bad;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
+ struct ib_sge sg;
+ int err = 0;
+ const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
+ const int page_index_mask = page_index_alignment - 1;
+ size_t pages_mapped = 0;
+ size_t pages_to_map = 0;
+ size_t pages_iter = 0;
+ int use_emergency_buf = 0;
+
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+ * so we need to align the offset and length accordingly */
+ if (start_page_index & page_index_mask) {
+ npages += start_page_index & page_index_mask;
+ start_page_index &= ~page_index_mask;
+ }
+
+ pages_to_map = ALIGN(npages, page_index_alignment);
+
+ if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
+ return -EINVAL;
+
+ size = sizeof(u64) * pages_to_map;
+ size = min_t(int, PAGE_SIZE, size);
+ /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
+ * code, when we are called from an invalidation. The pas buffer must
+ * be 2k-aligned for Connect-IB. */
+ pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
+ if (!pas) {
+ mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
+ pas = mlx5_ib_update_mtt_emergency_buffer;
+ size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
+ use_emergency_buf = 1;
+ mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+ memset(pas, 0, size);
+ }
+ pages_iter = size / sizeof(u64);
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
+ err = -ENOMEM;
+ goto free_pas;
+ }
+
+ for (pages_mapped = 0;
+ pages_mapped < pages_to_map && !err;
+ pages_mapped += pages_iter, start_page_index += pages_iter) {
+ dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
+
+ npages = min_t(size_t,
+ pages_iter,
+ ib_umem_num_pages(umem) - start_page_index);
+
+ if (!zap) {
+ __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
+ start_page_index, npages, pas,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages brought from the
+ * umem. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
+ }
+
+ dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr_id = (u64)(unsigned long)&umr_context;
+
+ sg.addr = dma;
+ sg.length = ALIGN(npages * sizeof(u64),
+ MLX5_UMR_MTT_ALIGNMENT);
+ sg.lkey = dev->umrc.mr->lkey;
+
+ wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
+ MLX5_IB_SEND_UMR_UPDATE_MTT;
+ wr.sg_list = &sg;
+ wr.num_sge = 1;
+ wr.opcode = MLX5_IB_WR_UMR;
+ umrwr->npages = sg.length / sizeof(u64);
+ umrwr->page_shift = PAGE_SHIFT;
+ umrwr->mkey = mr->mmr.key;
+ umrwr->target.offset = start_page_index;
+
+ mlx5_ib_init_umr_context(&umr_context);
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &wr, &bad);
+ if (err) {
+ mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_err(dev, "UMR completion failed, code %d\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+ up(&umrc->sem);
+ }
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+
+free_pas:
+ if (!use_emergency_buf)
+ free_page((unsigned long)pas);
+ else
+ mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+
+ return err;
+}
+#endif
+
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
u64 length, struct ib_umem *umem,
int npages, int page_shift,
@@ -825,6 +975,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
struct mlx5_ib_mr *mr;
int inlen;
int err;
+ bool pg_cap = !!(dev->mdev->caps.gen.flags &
+ MLX5_DEV_CAP_FLAG_ON_DMND_PG);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -836,8 +988,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
err = -ENOMEM;
goto err_1;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
+ mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ * in the page list submitted with the command. */
+ in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
in->seg.flags = convert_access(access_flags) |
MLX5_ACCESS_MODE_MTT;
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -856,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
goto err_2;
}
mr->umem = umem;
+ mr->live = 1;
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
@@ -910,6 +1067,10 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
+ } else if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ goto error;
}
if (!mr)
@@ -925,16 +1086,51 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
mr->npages = npages;
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages += npages;
- spin_unlock(&dev->mr_lock);
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem->odp_data) {
+ /*
+ * This barrier prevents the compiler from moving the
+ * setting of umem->odp_data->private to point to our
+ * MR, before reg_umr finished, to ensure that the MR
+ * initialization have finished before starting to
+ * handle invalidations.
+ */
+ smp_wmb();
+ mr->umem->odp_data->private = mr;
+ /*
+ * Make sure we will see the new
+ * umem->odp_data->private value in the invalidation
+ * routines, before we can get page faults on the
+ * MR. Page faults can happen once we put the MR in
+ * the tree, below this line. Without the barrier,
+ * there can be a fault handling and an invalidation
+ * before umem->odp_data->private == mr is visible to
+ * the invalidation handler.
+ */
+ smp_wmb();
+ }
+#endif
+
return &mr->ibmr;
error:
+ /*
+ * Destroy the umem *before* destroying the MR, to ensure we
+ * will not have any in-flight notifiers when destroying the
+ * MR.
+ *
+ * As the MR is completely invalid to begin with, and this
+ * error path is only taken if we can't push the mr entry into
+ * the pagefault tree, this is safe.
+ */
+
ib_umem_release(umem);
+ /* Kill the MR, and return an error code. */
+ clean_mr(mr);
return ERR_PTR(err);
}
@@ -971,17 +1167,14 @@ error:
return err;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int clean_mr(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
- struct ib_umem *umem = mr->umem;
- int npages = mr->npages;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int umred = mr->umred;
int err;
if (!umred) {
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -996,15 +1189,47 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
free_cached_mr(dev, mr);
}
- if (umem) {
+ if (!umred)
+ kfree(mr);
+
+ return 0;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int npages = mr->npages;
+ struct ib_umem *umem = mr->umem;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem && umem->odp_data) {
+ /* Prevent new page faults from succeeding */
+ mr->live = 0;
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&dev->mr_srcu);
+ /* Destroy all page mappings */
+ mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+ /*
+ * We kill the umem before the MR for ODP,
+ * so that there will not be any invalidations in
+ * flight, looking at the *mr struct.
+ */
ib_umem_release(umem);
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages -= npages;
- spin_unlock(&dev->mr_lock);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+
+ /* Avoid double-freeing the umem. */
+ umem = NULL;
}
+#endif
- if (!umred)
- kfree(mr);
+ clean_mr(mr);
+
+ if (umem) {
+ ib_umem_release(umem);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ }
return 0;
}
@@ -1028,7 +1253,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -1113,7 +1338,7 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
kfree(mr->sig);
}
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -1143,7 +1368,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
new file mode 100644
index 000000000000..a2c541c4809a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+#include "mlx5_ib.h"
+
+#define MAX_PREFETCH_LEN (4*1024*1024U)
+
+/* Timeout in ms to wait for an active mmu notifier to complete when handling
+ * a pagefault. */
+#define MMU_NOTIFIER_TIMEOUT 1000
+
+struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end)
+{
+ struct mlx5_ib_mr *mr;
+ const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+ u64 idx = 0, blk_start_idx = 0;
+ int in_block = 0;
+ u64 addr;
+
+ if (!umem || !umem->odp_data) {
+ pr_err("invalidation called on NULL umem or non-ODP umem\n");
+ return;
+ }
+
+ mr = umem->odp_data->private;
+
+ if (!mr || !mr->ibmr.pd)
+ return;
+
+ start = max_t(u64, ib_umem_start(umem), start);
+ end = min_t(u64, ib_umem_end(umem), end);
+
+ /*
+ * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
+ * while we are doing the invalidation, no page fault will attempt to
+ * overwrite the same MTTs. Concurent invalidations might race us,
+ * but they will write 0s as well, so no difference in the end result.
+ */
+
+ for (addr = start; addr < end; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ /*
+ * Strive to write the MTTs in chunks, but avoid overwriting
+ * non-existing MTTs. The huristic here can be improved to
+ * estimate the cost of another UMR vs. the cost of bigger
+ * UMR.
+ */
+ if (umem->odp_data->dma_list[idx] &
+ (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (!in_block) {
+ blk_start_idx = idx;
+ in_block = 1;
+ }
+ } else {
+ u64 umr_offset = idx & umr_block_mask;
+
+ if (in_block && umr_offset == 0) {
+ mlx5_ib_update_mtt(mr, blk_start_idx,
+ idx - blk_start_idx, 1);
+ in_block = 0;
+ }
+ }
+ }
+ if (in_block)
+ mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
+ 1);
+
+ /*
+ * We are now sure that the device will not access the
+ * memory. We can safely unmap it, and mark it as dirty if
+ * needed.
+ */
+
+ ib_umem_odp_unmap_dma_pages(umem, start, end);
+}
+
+#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
+ if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
+ ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
+} while (0)
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ int err;
+ struct mlx5_odp_caps hw_caps;
+ struct ib_odp_caps *caps = &dev->odp_caps;
+
+ memset(caps, 0, sizeof(*caps));
+
+ if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
+ return 0;
+
+ err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
+ if (err)
+ goto out;
+
+ caps->general_caps = IB_ODP_SUPPORT;
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ RECV);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ WRITE);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ READ);
+
+out:
+ return err;
+}
+
+static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
+ u32 key)
+{
+ u32 base_key = mlx5_base_mkey(key);
+ struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
+ struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+
+ if (!mmr || mmr->key != key || !mr->live)
+ return NULL;
+
+ return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ int error) {
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
+ pfault->mpfault.flags,
+ error);
+ if (ret)
+ pr_err("Failed to resolve the page fault on QP 0x%x\n",
+ qp->mqp.qpn);
+}
+
+/*
+ * Handle a single data segment in a page-fault WQE.
+ *
+ * Returns number of pages retrieved on success. The caller will continue to
+ * the next data segment.
+ * Can return the following error codes:
+ * -EAGAIN to designate a temporary error. The caller will abort handling the
+ * page fault and resolve it.
+ * -EFAULT when there's an error mapping the requested pages. The caller will
+ * abort the page fault handling and possibly move the QP to an error state.
+ * On other errors the QP should also be closed with an error.
+ */
+static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ u32 key, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped)
+{
+ struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
+ int srcu_key;
+ unsigned int current_seq;
+ u64 start_idx;
+ int npages = 0, ret = 0;
+ struct mlx5_ib_mr *mr;
+ u64 access_mask = ODP_READ_ALLOWED_BIT;
+
+ srcu_key = srcu_read_lock(&mib_dev->mr_srcu);
+ mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key);
+ /*
+ * If we didn't find the MR, it means the MR was closed while we were
+ * handling the ODP event. In this case we return -EFAULT so that the
+ * QP will be closed.
+ */
+ if (!mr || !mr->ibmr.pd) {
+ pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
+ key);
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+ if (!mr->umem->odp_data) {
+ pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped +=
+ (bcnt - pfault->mpfault.bytes_committed);
+ goto srcu_unlock;
+ }
+ if (mr->ibmr.pd != qp->ibqp.pd) {
+ pr_err("Page-fault with different PDs for QP and MR.\n");
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+
+ current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq);
+ /*
+ * Ensure the sequence number is valid for some time before we call
+ * gup.
+ */
+ smp_rmb();
+
+ /*
+ * Avoid branches - this code will perform correctly
+ * in all iterations (in iteration 2 and above,
+ * bytes_committed == 0).
+ */
+ io_virt += pfault->mpfault.bytes_committed;
+ bcnt -= pfault->mpfault.bytes_committed;
+
+ start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+
+ if (mr->umem->writable)
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+ npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt,
+ access_mask, current_seq);
+ if (npages < 0) {
+ ret = npages;
+ goto srcu_unlock;
+ }
+
+ if (npages > 0) {
+ mutex_lock(&mr->umem->odp_data->umem_mutex);
+ if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ /*
+ * No need to check whether the MTTs really belong to
+ * this MR, since ib_umem_odp_map_dma_pages already
+ * checks this.
+ */
+ ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
+ } else {
+ ret = -EAGAIN;
+ }
+ mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_err("Failed to update mkey page tables\n");
+ goto srcu_unlock;
+ }
+
+ if (bytes_mapped) {
+ u32 new_mappings = npages * PAGE_SIZE -
+ (io_virt - round_down(io_virt, PAGE_SIZE));
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
+ }
+ }
+
+srcu_unlock:
+ if (ret == -EAGAIN) {
+ if (!mr->umem->odp_data->dying) {
+ struct ib_umem_odp *odp_data = mr->umem->odp_data;
+ unsigned long timeout =
+ msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(
+ &odp_data->notifier_completion,
+ timeout)) {
+ pr_warn("timeout waiting for mmu notifier completion\n");
+ }
+ } else {
+ /* The MR is being killed, kill the QP as well. */
+ ret = -EFAULT;
+ }
+ }
+ srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
+ pfault->mpfault.bytes_committed = 0;
+ return ret ? ret : npages;
+}
+
+/**
+ * Parse a series of data segments for page fault handling.
+ *
+ * @qp the QP on which the fault occurred.
+ * @pfault contains page fault information.
+ * @wqe points at the first data segment in the WQE.
+ * @wqe_end points after the end of the WQE.
+ * @bytes_mapped receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ *
+ * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+ * negative error code.
+ */
+static int pagefault_data_segments(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault, void *wqe,
+ void *wqe_end, u32 *bytes_mapped,
+ u32 *total_wqe_bytes, int receive_queue)
+{
+ int ret = 0, npages = 0;
+ u64 io_virt;
+ u32 key;
+ u32 byte_count;
+ size_t bcnt;
+ int inline_segment;
+
+ /* Skip SRQ next-WQE segment. */
+ if (receive_queue && qp->ibqp.srq)
+ wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ if (bytes_mapped)
+ *bytes_mapped = 0;
+ if (total_wqe_bytes)
+ *total_wqe_bytes = 0;
+
+ while (wqe < wqe_end) {
+ struct mlx5_wqe_data_seg *dseg = wqe;
+
+ io_virt = be64_to_cpu(dseg->addr);
+ key = be32_to_cpu(dseg->lkey);
+ byte_count = be32_to_cpu(dseg->byte_count);
+ inline_segment = !!(byte_count & MLX5_INLINE_SEG);
+ bcnt = byte_count & ~MLX5_INLINE_SEG;
+
+ if (inline_segment) {
+ bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
+ wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
+ 16);
+ } else {
+ wqe += sizeof(*dseg);
+ }
+
+ /* receive WQE end of sg list. */
+ if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ io_virt == 0)
+ break;
+
+ if (!inline_segment && total_wqe_bytes) {
+ *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ }
+
+ /* A zero length data segment designates a length of 2GB. */
+ if (bcnt == 0)
+ bcnt = 1U << 31;
+
+ if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
+ pfault->mpfault.bytes_committed -=
+ min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ continue;
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
+ bcnt, bytes_mapped);
+ if (ret < 0)
+ break;
+ npages += ret;
+ }
+
+ return ret < 0 ? ret : npages;
+}
+
+/*
+ * Parse initiator WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_initiator_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ unsigned ds, opcode;
+#if defined(DEBUG)
+ u32 ctrl_wqe_index, ctrl_qpn;
+#endif
+
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
+ mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
+ ds, wqe_length);
+ return -EFAULT;
+ }
+
+ if (ds == 0) {
+ mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
+ wqe_index, qp->mqp.qpn);
+ return -EFAULT;
+ }
+
+#if defined(DEBUG)
+ ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
+ MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
+ if (wqe_index != ctrl_wqe_index) {
+ mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_wqe_index);
+ return -EFAULT;
+ }
+
+ ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
+ MLX5_WQE_CTRL_QPN_SHIFT;
+ if (qp->mqp.qpn != ctrl_qpn) {
+ mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_qpn);
+ return -EFAULT;
+ }
+#endif /* DEBUG */
+
+ *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
+ *wqe += sizeof(*ctrl);
+
+ opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_OPCODE_MASK;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ case MLX5_OPCODE_SEND_INVAL:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ break;
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_WRITE))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ case MLX5_OPCODE_RDMA_READ:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_READ))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ case IB_QPT_UD:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_datagram_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
+ qp->ibqp.qp_type, opcode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse responder WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_responder_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
+ if (qp->ibqp.srq) {
+ mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
+ return -EFAULT;
+ }
+
+ if (qp->wq_sig) {
+ mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
+ return -EFAULT;
+ }
+
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
+ return -EFAULT;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_RECV))
+ goto invalid_transport_or_opcode;
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EFAULT;
+ }
+
+ *wqe_end = *wqe + wqe_size;
+
+ return 0;
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret;
+ void *wqe, *wqe_end;
+ u32 bytes_mapped, total_wqe_bytes;
+ char *buffer = NULL;
+ int resume_with_error = 0;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer) {
+ mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
+ PAGE_SIZE);
+ if (ret < 0) {
+ mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
+ -ret, wqe_index, qp->mqp.qpn);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ wqe = buffer;
+ if (requestor)
+ ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ else
+ ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ if (ret < 0) {
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ if (wqe >= wqe_end) {
+ mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !requestor);
+ if (ret == -EAGAIN) {
+ goto resolve_page_fault;
+ } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
+ mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
+ -ret);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+resolve_page_fault:
+ mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
+ qp->mqp.qpn, resume_with_error, pfault->mpfault.flags);
+
+ free_page((unsigned long)buffer);
+}
+
+static int pages_in_range(u64 address, u32 length)
+{
+ return (ALIGN(address + length, PAGE_SIZE) -
+ (address & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_pagefault *mpfault = &pfault->mpfault;
+ u64 address;
+ u32 length;
+ u32 prefetch_len = mpfault->bytes_committed;
+ int prefetch_activated = 0;
+ u32 rkey = mpfault->rdma.r_key;
+ int ret;
+
+ /* The RDMA responder handler handles the page fault in two parts.
+ * First it brings the necessary pages for the current packet
+ * (and uses the pfault context), and then (after resuming the QP)
+ * prefetches more pages. The second operation cannot use the pfault
+ * context and therefore uses the dummy_pfault context allocated on
+ * the stack */
+ struct mlx5_ib_pfault dummy_pfault = {};
+
+ dummy_pfault.mpfault.bytes_committed = 0;
+
+ mpfault->rdma.rdma_va += mpfault->bytes_committed;
+ mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
+ mpfault->rdma.rdma_op_len);
+ mpfault->bytes_committed = 0;
+
+ address = mpfault->rdma.rdma_va;
+ length = mpfault->rdma.rdma_op_len;
+
+ /* For some operations, the hardware cannot tell the exact message
+ * length, and in those cases it reports zero. Use prefetch
+ * logic. */
+ if (length == 0) {
+ prefetch_activated = 1;
+ length = mpfault->rdma.packet_size;
+ prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
+ NULL);
+ if (ret == -EAGAIN) {
+ /* We're racing with an invalidation, don't prefetch */
+ prefetch_activated = 0;
+ } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ return;
+ }
+
+ mlx5_ib_page_fault_resume(qp, pfault, 0);
+
+ /* At this point, there might be a new pagefault already arriving in
+ * the eq, switch to the dummy pagefault for the rest of the
+ * processing. We're still OK with the objects being alive as the
+ * work-queue is being fenced. */
+
+ if (prefetch_activated) {
+ ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
+ address,
+ prefetch_len,
+ NULL);
+ if (ret < 0) {
+ pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, prefetch_activated,
+ qp->ibqp.qp_num, address, prefetch_len);
+ }
+ }
+}
+
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ u8 event_subtype = pfault->mpfault.event_subtype;
+
+ switch (event_subtype) {
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+ break;
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+ break;
+ default:
+ pr_warn("Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ break;
+ }
+}
+
+static void mlx5_ib_qp_pfault_action(struct work_struct *work)
+{
+ struct mlx5_ib_pfault *pfault = container_of(work,
+ struct mlx5_ib_pfault,
+ work);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(&pfault->mpfault);
+ struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
+ pagefaults[context]);
+ mlx5_ib_mr_pfault_handler(qp, pfault);
+}
+
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 1;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+
+ /*
+ * Note that at this point, we are guarenteed that no more
+ * work queue elements will be posted to the work queue with
+ * the QP we are closing.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+}
+
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 0;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+}
+
+static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
+ struct mlx5_pagefault *pfault)
+{
+ /*
+ * Note that we will only get one fault event per QP per context
+ * (responder/initiator, read/write), until we resolve the page fault
+ * with the mlx5_ib_page_fault_resume command. Since this function is
+ * called from within the work element, there is no risk of missing
+ * events.
+ */
+ struct mlx5_ib_qp *mibqp = to_mibqp(qp);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(pfault);
+ struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
+
+ qp_pfault->mpfault = *pfault;
+
+ /* No need to stop interrupts here since we are in an interrupt */
+ spin_lock(&mibqp->disable_page_faults_lock);
+ if (!mibqp->disable_page_faults)
+ queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
+ spin_unlock(&mibqp->disable_page_faults_lock);
+}
+
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
+{
+ int i;
+
+ qp->disable_page_faults = 1;
+ spin_lock_init(&qp->disable_page_faults_lock);
+
+ qp->mqp.pfault_handler = mlx5_ib_pfault_handler;
+
+ for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
+ INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
+}
+
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
+{
+ int ret;
+
+ ret = init_srcu_struct(&ibdev->mr_srcu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
+{
+ cleanup_srcu_struct(&ibdev->mr_srcu);
+}
+
+int __init mlx5_ib_odp_init(void)
+{
+ mlx5_ib_page_fault_wq =
+ create_singlethread_workqueue("mlx5_ib_page_faults");
+ if (!mlx5_ib_page_fault_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_ib_odp_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_page_fault_wq);
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1cae1c7132b4..be0cd358b080 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -70,15 +70,6 @@ static const u32 mlx5_ib_opcode[] = {
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
-struct umr_wr {
- u64 virt_addr;
- struct ib_pd *pd;
- unsigned int page_shift;
- unsigned int npages;
- u32 length;
- int access_flags;
- u32 mkey;
-};
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -110,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
+/**
+ * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
+ *
+ * @qp: QP to copy from.
+ * @send: copy from the send queue when non-zero, use the receive queue
+ * otherwise.
+ * @wqe_index: index to start copying from. For send work queues, the
+ * wqe_index is in units of MLX5_SEND_WQE_BB.
+ * For receive work queue, it is the number of work queue
+ * element in the queue.
+ * @buffer: destination buffer.
+ * @length: maximum number of bytes to copy.
+ *
+ * Copies at least a single WQE, but may copy more data.
+ *
+ * Return: the number of bytes copied, or an error code.
+ */
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length)
+{
+ struct ib_device *ibdev = qp->ibqp.device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
+ size_t offset;
+ size_t wq_end;
+ struct ib_umem *umem = qp->umem;
+ u32 first_copy_length;
+ int wqe_length;
+ int ret;
+
+ if (wq->wqe_cnt == 0) {
+ mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EINVAL;
+ }
+
+ offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
+ wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
+
+ if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (offset > umem->length ||
+ (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
+ return -EINVAL;
+
+ first_copy_length = min_t(u32, offset + length, wq_end) - offset;
+ ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
+ if (ret)
+ return ret;
+
+ if (send) {
+ struct mlx5_wqe_ctrl_seg *ctrl = buffer;
+ int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+ } else {
+ wqe_length = 1 << wq->wqe_shift;
+ }
+
+ if (wqe_length <= first_copy_length)
+ return first_copy_length;
+
+ ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
+ wqe_length - first_copy_length);
+ if (ret)
+ return ret;
+
+ return wqe_length;
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -814,6 +876,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int inlen = sizeof(*in);
int err;
+ mlx5_ib_odp_create_qp(qp);
+
gen = &dev->mdev->caps.gen;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
@@ -1098,11 +1162,13 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return;
- if (qp->state != IB_QPS_RESET)
+ if (qp->state != IB_QPS_RESET) {
+ mlx5_ib_qp_disable_pagefaults(qp);
if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn);
+ }
get_cqs(qp, &send_cq, &recv_cq);
@@ -1650,6 +1716,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (mlx5_st < 0)
goto out;
+ /* If moving to a reset or error state, we must disable page faults on
+ * this QP and flush all current page faults. Otherwise a stale page
+ * fault may attempt to work on this QP after it is reset and moved
+ * again to RTS, and may cause the driver and the device to get out of
+ * sync. */
+ if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
+ (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
+ mlx5_ib_qp_disable_pagefaults(qp);
+
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
in->optparam = cpu_to_be32(optpar);
@@ -1659,6 +1734,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (err)
goto out;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ mlx5_ib_qp_enable_pagefaults(qp);
+
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
@@ -1848,37 +1926,70 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->mkey_mask = frwr_mkey_mask();
}
+static __be64 get_umr_reg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_PD |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_unreg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_mtt_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
{
- struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
- u64 mask;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
memset(umr, 0, sizeof(*umr));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
+ else
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
- umr->flags = 1 << 5; /* fail if not free */
umr->klm_octowords = get_klm_octo(umrwr->npages);
- mask = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
+ umr->mkey_mask = get_umr_update_mtt_mask();
+ umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ } else {
+ umr->mkey_mask = get_umr_reg_mr_mask();
+ }
} else {
- umr->flags = 2 << 5; /* fail if free */
- mask = MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ umr->mkey_mask = get_umr_unreg_mr_mask();
}
if (!wr->num_sge)
- umr->flags |= (1 << 7); /* inline */
+ umr->flags |= MLX5_UMR_INLINE;
}
static u8 get_umr_flags(int acc)
@@ -1895,7 +2006,7 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
{
memset(seg, 0, sizeof(*seg));
if (li) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
@@ -1912,19 +2023,23 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
{
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
- seg->flags = convert_access(wr->wr.fast_reg.access_flags);
- seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
- seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
- seg->len = cpu_to_be64(wr->wr.fast_reg.length);
- seg->log2_page_size = wr->wr.fast_reg.page_shift;
+ seg->flags = convert_access(umrwr->access_flags);
+ if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
+ }
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(wr->wr.fast_reg.rkey));
+ mlx5_mkey_variant(umrwr->mkey));
}
static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
@@ -2927,6 +3042,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_state;
int err = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * Wait for any outstanding page faults, in case the user frees memory
+ * based upon this query's result.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+#endif
+
mutex_lock(&qp->mutex);
outb = kzalloc(sizeof(*outb), GFP_KERNEL);
if (!outb) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index fef067c959fc..c0d0296e7a00 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2341,9 +2341,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
" offset = %u, page size = %u.\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length,
- region->offset, region->page_size);
+ ib_umem_offset(region), region->page_size);
- skip_pages = ((u32)region->offset) >> 12;
+ skip_pages = ((u32)ib_umem_offset(region)) >> 12;
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
ib_umem_release(region);
@@ -2408,7 +2408,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
region_length -= skip_pages << 12;
for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
skip_pages = 0;
- if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
+ if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
goto enough_pages;
if ((page_count&0x01FF) == 0) {
if (page_count >= 1024 * 512) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index ac02ce4e8040..f3cc8c9e65ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -96,7 +96,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
union ib_gid sgid;
- u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
@@ -118,9 +117,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
goto av_conf_err;
}
- memset(&zmac, 0, ETH_ALEN);
- if (pd->uctx &&
- memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+ if (pd->uctx) {
status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
attr->dmac, &attr->vlan_id);
if (status) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 4c68305ee781..fb8d8c4dfbb9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -805,7 +805,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto umem_err;
mr->hwmr.pbe_size = mr->umem->page_size;
- mr->hwmr.fbo = mr->umem->offset;
+ mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
@@ -1410,6 +1410,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
mutex_unlock(&dev->dev_lock);
if (status)
goto mbx_err;
+ if (qp->qp_type == IB_QPT_UD)
+ qp_attr->qkey = params.qkey;
qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->path_mtu =
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 9bbb55347cc1..a77fb4fb14e4 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -258,7 +258,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d7562beb5423..8ba80a6d3a46 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,9 +98,15 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -317,6 +323,7 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
+ struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -477,10 +484,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev, int flush);
+int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev, int flush);
-int ipoib_ib_dev_stop(struct net_device *dev, int flush);
+int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -492,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 933efcea0d03..56959adb6c7d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(ipoib_workqueue, &priv->cm.start_task);
+ queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
+ queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 72626c348174..fe65abb5150c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev, int flush)
+int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, flush);
+ ipoib_ib_dev_stop(dev);
return -1;
}
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev, int flush)
+int ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -880,8 +880,7 @@ timeout:
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
begin = jiffies;
@@ -918,7 +917,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1040,12 +1039,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_ib_dev_open(dev, 0) != 0)
+ ipoib_ib_dev_stop(dev);
+ if (ipoib_ib_dev_open(dev) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1097,7 +1096,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 58b5aa3b6f2d..6bad17d4d588 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_stop(dev);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 0);
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(ipoib_workqueue, &priv->restart_task);
+ queue_work(priv->wq, &priv->restart_task);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,15 +1262,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out_neigh_hash_cleanup;
+ goto out;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1285,16 +1283,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
+ /*
+ * Must be after ipoib_ib_dev_init so we can allocate a per
+ * device wq there and use it here
+ */
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out_dev_uninit;
+
return 0;
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
-out_neigh_hash_cleanup:
- ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1317,6 +1323,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
+ /*
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
+ * work queue
+ */
+ ipoib_neigh_hash_uninit(dev);
+
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1324,8 +1336,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
-
- ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1636,7 +1646,7 @@ register_failed:
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1707,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1748,8 +1758,13 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
+ *
+ * In addition, bringing one device up and another down at the
+ * same time can deadlock a single workqueue, so we have this
+ * global fallback workqueue, but we also attempt to open a
+ * per device workqueue each time we bring an interface up
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ffb83b5f7e80..bc50dd0d0e4d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,12 +190,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
-
- if (!ipoib_cm_admin_enabled(dev)) {
- rtnl_lock();
- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
- rtnl_unlock();
- }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -277,16 +271,27 @@ ipoib_mcast_sendonly_join_complete(int status,
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
+ /*
+ * We have to take the mutex to force mcast_sendonly_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
if (status == -ENETRESET)
- return 0;
+ goto out;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (status) {
if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
+ ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
+ "join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
/* Flush out any queued packets */
@@ -296,11 +301,15 @@ ipoib_mcast_sendonly_join_complete(int status,
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
- &mcast->flags);
}
+out:
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ mutex_unlock(&mcast_mutex);
return status;
}
@@ -318,12 +327,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
int ret = 0;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
+ ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
+ "multicast joins\n");
return -ENODEV;
}
- if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
+ "sendonly join\n");
return -EBUSY;
}
@@ -331,6 +342,9 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
+ mutex_lock(&mcast_mutex);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -343,12 +357,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
if (IS_ERR(mcast->mc)) {
ret = PTR_ERR(mcast->mc);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
- ret);
+ complete(&mcast->done);
+ ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
+ "failed (ret = %d)\n", ret);
} else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
- mcast->mcmember.mgid.raw);
+ ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
+ "sendonly join\n", mcast->mcmember.mgid.raw);
}
+ mutex_unlock(&mcast_mutex);
return ret;
}
@@ -359,18 +375,29 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
carrier_on_task);
struct ib_port_attr attr;
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed.
- */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- rtnl_lock();
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_ADMIN_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -385,60 +412,63 @@ static int ipoib_mcast_join_complete(int status,
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
mcast->mcmember.mgid.raw, status);
+ /*
+ * We have to take the mutex to force mcast_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
- if (status == -ENETRESET) {
- status = 0;
+ if (status == -ENETRESET)
goto out;
- }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
/*
- * Defer carrier on work to ipoib_workqueue to avoid a
+ * Defer carrier on work to priv->wq to avoid a
* deadlock on rtnl_lock here.
*/
if (mcast == priv->broadcast)
- queue_work(ipoib_workqueue, &priv->carrier_on_task);
-
- status = 0;
- goto out;
- }
-
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- } else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ } else {
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ }
}
- }
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-
- mutex_lock(&mcast_mutex);
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ }
+out:
spin_lock_irq(&priv->lock);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-out:
- complete(&mcast->done);
+
return status;
}
@@ -487,10 +517,9 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ mutex_lock(&mcast_mutex);
init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
@@ -504,13 +533,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task,
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
- mutex_unlock(&mcast_mutex);
}
+ mutex_unlock(&mcast_mutex);
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -547,8 +574,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, HZ);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ HZ);
mutex_unlock(&mcast_mutex);
return;
}
@@ -563,7 +590,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
ipoib_mcast_join(dev, priv->broadcast, 0);
return;
}
@@ -571,23 +599,33 @@ void ipoib_mcast_join_task(struct work_struct *work)
while (1) {
struct ipoib_mcast *mcast = NULL;
+ /*
+ * Need the mutex so our flags are consistent, need the
+ * priv->lock so we don't race with list removals in either
+ * mcast_dev_flush or mcast_restart_task
+ */
+ mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
/* Found the next unjoined group */
break;
}
}
spin_unlock_irq(&priv->lock);
+ mutex_unlock(&mcast_mutex);
if (&mcast->list == &priv->multicast_list) {
/* All done */
break;
}
- ipoib_mcast_join(dev, mcast, 1);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_mcast_sendonly_join(mcast);
+ else
+ ipoib_mcast_join(dev, mcast, 1);
return;
}
@@ -604,13 +642,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -621,8 +659,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -633,6 +670,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -685,6 +725,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
+ if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
if (!mcast->ah) {
@@ -698,8 +740,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
- else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
/*
* If lookup completes between here and out:, don't
@@ -759,9 +799,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -794,8 +837,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- ipoib_mcast_stop_thread(dev, 0);
-
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -880,14 +921,38 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /* We have to cancel outside of the spinlock */
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ /*
+ * We have to cancel outside of the spinlock, but we have to
+ * take the rtnl lock or else we race with the removal of
+ * entries from the remove list in mcast_dev_flush as part
+ * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
+ * to signal that we have hit this particular race, and we
+ * return since we know we don't need to do anything else
+ * anyway.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
-
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- ipoib_mcast_start_thread(dev);
+ /*
+ * Restart our join task if needed
+ */
+ ipoib_mcast_start_thread(dev);
+ rtnl_unlock();
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c56d5d44c53b..b72a753eb41d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,10 +145,20 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = create_singlethread_workqueue("ipoib_wq");
+ if (!priv->wq) {
+ printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
+ return -ENODEV;
+ }
+
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- return -ENODEV;
+ goto out_free_wq;
}
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -242,6 +252,10 @@ out_free_mr:
out_free_pd:
ib_dealloc_pd(priv->pd);
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
return -ENODEV;
}
@@ -270,6 +284,12 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
+
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 20ca6a619476..6a594aac2290 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,7 +97,7 @@ module_param_named(pi_enable, iser_pi_enable, bool, 0644);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
module_param_named(pi_guard, iser_pi_guard, int, 0644);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:IP_CSUM)");
+MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
static struct workqueue_struct *release_wq;
struct iser_global ig;
@@ -164,18 +164,42 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
-int iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+/**
+ * iser_initialize_task_headers() - Initialize task headers
+ * @task: iscsi task
+ * @tx_desc: iser tx descriptor
+ *
+ * Notes:
+ * This routine may race with iser teardown flow for scsi
+ * error handling TMFs. So for TMF we should acquire the
+ * state mutex to avoid dereferencing the IB device which
+ * may have already been terminated.
+ */
+int
+iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
- struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
+ const bool mgmt_task = !task->sc && !in_interrupt();
+ int ret = 0;
+
+ if (unlikely(mgmt_task))
+ mutex_lock(&iser_conn->state_mutex);
+
+ if (unlikely(iser_conn->state != ISER_CONN_UP)) {
+ ret = -ENODEV;
+ goto out;
+ }
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(device->ib_device, dma_addr))
- return -ENOMEM;
+ if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
+ ret = -ENOMEM;
+ goto out;
+ }
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -183,7 +207,11 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->iser_conn = iser_conn;
- return 0;
+out:
+ if (unlikely(mgmt_task))
+ mutex_unlock(&iser_conn->state_mutex);
+
+ return ret;
}
/**
@@ -199,9 +227,14 @@ static int
iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ int ret;
- if (iser_initialize_task_headers(task, &iser_task->desc))
- return -ENOMEM;
+ ret = iser_initialize_task_headers(task, &iser_task->desc);
+ if (ret) {
+ iser_err("Failed to init task %p, err = %d\n",
+ iser_task, ret);
+ return ret;
+ }
/* mgmt task */
if (!task->sc)
@@ -508,8 +541,8 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
- iscsi_conn_stop(cls_conn, flag);
iser_conn_terminate(iser_conn);
+ iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
@@ -541,12 +574,13 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
static inline unsigned int
iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION |
- SHOST_DIX_TYPE3_PROTECTION : 0);
+ return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
+ SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
+ SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
}
/**
@@ -569,6 +603,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -586,26 +621,41 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
+ max_cmds = iser_conn->max_cmds;
+
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state != ISER_CONN_UP) {
+ iser_err("iser conn %p already started teardown\n",
+ iser_conn);
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+
ib_conn = &iser_conn->ib_conn;
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
- if (iser_pi_guard)
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
- else
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
}
- }
- if (iscsi_host_add(shost, ep ?
- ib_conn->device->ib_device->dma_device : NULL))
- goto free_host;
+ if (iscsi_host_add(shost,
+ ib_conn->device->ib_device->dma_device)) {
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+ }
- if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+ if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
- cmds_max, ISER_DEF_XMIT_CMDS_MAX);
- cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+ cmds_max, max_cmds);
+ cmds_max = max_cmds;
}
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cd4174ca9a76..5ce26817e7e1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,34 +69,31 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.4.8"
+#define DRV_VER "1.5"
#define iser_dbg(fmt, arg...) \
do { \
- if (iser_debug_level > 2) \
+ if (unlikely(iser_debug_level > 2)) \
printk(KERN_DEBUG PFX "%s: " fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
- if (iser_debug_level > 0) \
+ if (unlikely(iser_debug_level > 0)) \
pr_warn(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (unlikely(iser_debug_level > 1)) \
pr_info(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
-#define iser_err(fmt, arg...) \
- do { \
- printk(KERN_ERR PFX "%s: " fmt, \
- __func__ , ## arg); \
- } while (0)
+#define iser_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
#define SHIFT_4K 12
#define SIZE_4K (1ULL << SHIFT_4K)
@@ -144,6 +141,11 @@
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
+#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \
+ - ISER_MAX_TX_MISC_PDUS \
+ - ISER_MAX_RX_MISC_PDUS) / \
+ (1 + ISER_INFLIGHT_DATAOUTS))
+
#define ISER_WC_BATCH_COUNT 16
#define ISER_SIGNAL_CMD_COUNT 32
@@ -247,7 +249,6 @@ struct iscsi_endpoint;
* @va: MR start address (buffer va)
* @len: MR length
* @mem_h: pointer to registration context (FMR/Fastreg)
- * @is_mr: indicates weather we registered the buffer
*/
struct iser_mem_reg {
u32 lkey;
@@ -255,7 +256,6 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
- int is_mr;
};
/**
@@ -323,8 +323,6 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
-#define ISER_MAX_CQ 4
-
struct iser_conn;
struct ib_conn;
struct iscsi_iser_task;
@@ -375,7 +373,7 @@ struct iser_device {
struct list_head ig_list;
int refcount;
int comps_used;
- struct iser_comp comps[ISER_MAX_CQ];
+ struct iser_comp *comps;
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
@@ -432,6 +430,7 @@ struct fast_reg_descriptor {
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
* @post_recv_buf_count: post receive counter
+ * @sig_count: send work request signal count
* @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @comp: iser completion context
@@ -452,6 +451,7 @@ struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
int post_recv_buf_count;
+ u8 sig_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_comp *comp;
@@ -482,6 +482,7 @@ struct ib_conn {
* to max number of post recvs
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
* @min_posted_rx: (qp_max_recv_dtos >> 2)
+ * @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
* @state_mutex: protects iser onnection state
@@ -507,6 +508,7 @@ struct iser_conn {
unsigned qp_max_recv_dtos;
unsigned qp_max_recv_dtos_mask;
unsigned min_posted_rx;
+ u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
struct mutex state_mutex;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5a489ea63732..3821633f1065 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -369,7 +369,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
return 0;
}
-static inline bool iser_signal_comp(int sig_count)
+static inline bool iser_signal_comp(u8 sig_count)
{
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}
@@ -388,7 +388,7 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- static unsigned sig_count;
+ u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -435,7 +435,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(++sig_count));
+ iser_signal_comp(sig_count));
if (!err)
return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 6c5ce357fba6..abce9339333f 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -73,7 +73,6 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
- int i;
char *p, *from;
sgl = (struct scatterlist *)data->buf;
@@ -409,7 +408,6 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
@@ -440,13 +438,13 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
return 0;
}
-static inline void
+static void
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
- domain->sig.dif.pi_interval = sc->device->sector_size;
- domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
+ domain->sig.dif.pi_interval = scsi_prot_interval(sc);
+ domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -454,8 +452,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
- if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
- scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
+ if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
};
@@ -473,26 +470,16 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
default:
iser_err("Unsupported PI operation %d\n",
@@ -503,26 +490,28 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static int
+static inline void
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
- switch (scsi_get_prot_type(sc)) {
- case SCSI_PROT_DIF_TYPE0:
- break;
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
- *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
- break;
- case SCSI_PROT_DIF_TYPE3:
- *mask = ISER_CHECK_GUARD;
- break;
- default:
- iser_err("Unsupported protection type %d\n",
- scsi_get_prot_type(sc));
- return -EINVAL;
- }
+ *mask = 0;
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
+ *mask |= ISER_CHECK_REFTAG;
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
+ *mask |= ISER_CHECK_GUARD;
+}
- return 0;
+static void
+iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
}
static int
@@ -536,26 +525,17 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct ib_send_wr *bad_wr, *wr = NULL;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
if (ret)
goto err;
- ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
- if (ret)
- goto err;
+ iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
@@ -585,12 +565,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
sig_sge->lkey = pi_ctx->sig_mr->lkey;
sig_sge->addr = 0;
- sig_sge->length = data_sge->length + prot_sge->length;
- if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
- scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
- sig_sge->length += (data_sge->length /
- iser_task->sc->device->sector_size) * 8;
- }
+ sig_sge->length = scsi_transfer_length(iser_task->sc);
iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
sig_sge->addr, sig_sge->length,
@@ -613,7 +588,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
- u8 key;
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
@@ -645,14 +619,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
if (!(desc->reg_indicators & ind)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ iser_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -770,15 +738,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
regd_buf->reg.va = sig_sge.addr;
regd_buf->reg.len = sig_sge.length;
- regd_buf->reg.is_mr = 1;
} else {
- if (desc) {
+ if (desc)
regd_buf->reg.rkey = desc->data_mr->rkey;
- regd_buf->reg.is_mr = 1;
- } else {
+ else
regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.is_mr = 0;
- }
regd_buf->reg.lkey = data_sge.lkey;
regd_buf->reg.va = data_sge.addr;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 67225bb82bb5..695a2704bd43 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -76,7 +76,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device_attr *dev_attr = &device->dev_attr;
- int ret, i;
+ int ret, i, max_cqe;
ret = ib_query_device(device->ib_device, dev_attr);
if (ret) {
@@ -104,11 +104,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
return -1;
}
- device->comps_used = min(ISER_MAX_CQ,
+ device->comps_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
- iser_info("using %d CQs, device %s supports %d vectors\n",
+
+ device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
+ GFP_KERNEL);
+ if (!device->comps)
+ goto comps_err;
+
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
+
+ iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors);
+ device->ib_device->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
@@ -122,7 +130,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_callback,
iser_cq_event_callback,
(void *)comp,
- ISER_MAX_CQ_LEN, i);
+ max_cqe, i);
if (IS_ERR(comp->cq)) {
comp->cq = NULL;
goto cq_err;
@@ -162,6 +170,8 @@ cq_err:
}
ib_dealloc_pd(device->pd);
pd_err:
+ kfree(device->comps);
+comps_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
@@ -187,6 +197,9 @@ static void iser_free_device_ib_res(struct iser_device *device)
(void)ib_dereg_mr(device->mr);
(void)ib_dealloc_pd(device->pd);
+ kfree(device->comps);
+ device->comps = NULL;
+
device->mr = NULL;
device->pd = NULL;
}
@@ -425,7 +438,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
+ struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
+ ib_conn);
struct iser_device *device;
+ struct ib_device_attr *dev_attr;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
int index, min_index = 0;
@@ -433,6 +449,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ dev_attr = &device->dev_attr;
memset(&init_attr, 0, sizeof init_attr);
@@ -460,8 +477,20 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
if (ib_conn->pi_support) {
init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
} else {
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
+ } else {
+ init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
+ iser_dbg("device %s supports max_send_wr %d\n",
+ device->ib_device->name, dev_attr->max_qp_wr);
+ }
}
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
@@ -475,7 +504,11 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
return ret;
out_err:
+ mutex_lock(&ig.connlist_mutex);
+ ib_conn->comp->active_qps--;
+ mutex_unlock(&ig.connlist_mutex);
iser_err("unable to alloc mem or create resource, err %d\n", ret);
+
return ret;
}
@@ -610,9 +643,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&iser_conn->state_mutex);
- if (iser_conn->state != ISER_CONN_DOWN)
+ if (iser_conn->state != ISER_CONN_DOWN) {
iser_warn("iser conn %p state %d, expected state down.\n",
iser_conn, iser_conn->state);
+ iser_conn->state = ISER_CONN_DOWN;
+ }
/*
* In case we never got to bind stage, we still need to
* release IB resources (which is safe to call more than once).
@@ -662,8 +697,10 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
/* post an indication that all flush errors were consumed */
err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
- if (err)
+ if (err) {
iser_err("conn %p failed to post beacon", ib_conn);
+ return 1;
+ }
wait_for_completion(&ib_conn->flush_comp);
}
@@ -846,20 +883,21 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
- iser_disconnected_handler(cma_id);
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ iser_cleanup_handler(cma_id, false);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* we *must* destroy the device as we cannot rely
* on iscsid to be around to initiate error handling.
- * also implicitly destroy the cma_id.
+ * also if we are not in state DOWN implicitly destroy
+ * the cma_id.
*/
iser_cleanup_handler(cma_id, true);
- iser_conn->ib_conn.cma_id = NULL;
- ret = 1;
- break;
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- iser_cleanup_handler(cma_id, false);
+ if (iser_conn->state != ISER_CONN_DOWN) {
+ iser_conn->ib_conn.cma_id = NULL;
+ ret = 1;
+ }
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -981,7 +1019,6 @@ int iser_reg_page_vec(struct ib_conn *ib_conn,
mem_reg->rkey = mem->fmr->rkey;
mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
- mem_reg->is_mr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
@@ -1008,7 +1045,7 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
int ret;
- if (!reg->is_mr)
+ if (!reg->mem_h)
return;
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
@@ -1028,11 +1065,10 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct fast_reg_descriptor *desc = reg->mem_h;
- if (!reg->is_mr)
+ if (!desc)
return;
reg->mem_h = NULL;
- reg->is_mr = 0;
spin_lock_bh(&ib_conn->lock);
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_bh(&ib_conn->lock);
@@ -1049,7 +1085,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
+ rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
@@ -1073,7 +1109,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -1110,7 +1146,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
@@ -1160,6 +1196,7 @@ static void
iser_handle_comp_error(struct ib_conn *ib_conn,
struct ib_wc *wc)
{
+ void *wr_id = (void *)(uintptr_t)wc->wr_id;
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn);
@@ -1168,8 +1205,8 @@ iser_handle_comp_error(struct ib_conn *ib_conn,
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
- if (is_iser_tx_desc(iser_conn, (void *)wc->wr_id)) {
- struct iser_tx_desc *desc = (struct iser_tx_desc *)wc->wr_id;
+ if (is_iser_tx_desc(iser_conn, wr_id)) {
+ struct iser_tx_desc *desc = wr_id;
if (desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
@@ -1193,14 +1230,14 @@ static void iser_handle_wc(struct ib_wc *wc)
struct iser_rx_desc *rx_desc;
ib_conn = wc->qp->qp_context;
- if (wc->status == IB_WC_SUCCESS) {
+ if (likely(wc->status == IB_WC_SUCCESS)) {
if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)wc->wr_id;
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
iser_rcv_completion(rx_desc, wc->byte_len,
ib_conn);
} else
if (wc->opcode == IB_WC_SEND) {
- tx_desc = (struct iser_tx_desc *)wc->wr_id;
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
iser_snd_completion(tx_desc, ib_conn);
} else {
iser_err("Unknown wc opcode %d\n", wc->opcode);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 10641b7816f4..dafb3c531f96 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -36,11 +35,17 @@
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
+ ISERT_MAX_CONN)
+
+int isert_debug_level = 0;
+module_param_named(debug_level, isert_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
-static struct workqueue_struct *isert_rx_wq;
static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+static inline bool
+isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+{
+ return (conn->pi_support &&
+ cmd->prot_op != TARGET_PROT_NORMAL);
+}
+
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
struct isert_conn *isert_conn = (struct isert_conn *)context;
- pr_err("isert_qp_event_callback event: %d\n", e->event);
+ isert_err("conn %p event: %d\n", isert_conn, e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
- pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
+ isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
break;
default:
break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
ret = ib_query_device(ib_dev, devattr);
if (ret) {
- pr_err("ib_query_device() failed: %d\n", ret);
+ isert_err("ib_query_device() failed: %d\n", ret);
return ret;
}
- pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
- pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
+ isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
+ isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
return 0;
}
static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
- u8 protection)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
- int ret, index, min_index = 0;
+ struct isert_comp *comp;
+ int ret, i, min = 0;
mutex_lock(&device_list_mutex);
- for (index = 0; index < device->cqs_used; index++)
- if (device->cq_active_qps[index] <
- device->cq_active_qps[min_index])
- min_index = index;
- device->cq_active_qps[min_index]++;
- pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
+ for (i = 0; i < device->comps_used; i++)
+ if (device->comps[i].active_qps <
+ device->comps[min].active_qps)
+ min = i;
+ comp = &device->comps[min];
+ comp->active_qps++;
+ isert_info("conn %p, using comp %p min_index: %d\n",
+ isert_conn, comp, min);
mutex_unlock(&device_list_mutex);
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
- attr.send_cq = device->dev_tx_cq[min_index];
- attr.recv_cq = device->dev_rx_cq[min_index];
+ attr.send_cq = comp->cq;
+ attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
- attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
/*
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
- if (protection)
+ if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
- cma_id->device);
- pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
- isert_conn->conn_pd->device);
-
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
if (ret) {
- pr_err("rdma_create_qp failed for cma_id %d\n", ret);
- return ret;
+ isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ goto err;
}
isert_conn->conn_qp = cma_id->qp;
- pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
return 0;
+err:
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
+
+ return ret;
}
static void
isert_cq_event_callback(struct ib_event *e, void *context)
{
- pr_debug("isert_cq_event_callback event: %d\n", e->event);
+ isert_dbg("event: %d\n", e->event);
}
static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
}
isert_conn->conn_rx_desc_head = 0;
+
return 0;
dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
kfree(isert_conn->conn_rx_descs);
isert_conn->conn_rx_descs = NULL;
fail:
+ isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
+
return -ENOMEM;
}
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
-static void isert_cq_tx_work(struct work_struct *);
-static void isert_cq_tx_callback(struct ib_cq *, void *);
-static void isert_cq_rx_work(struct work_struct *);
-static void isert_cq_rx_callback(struct ib_cq *, void *);
+static void isert_cq_work(struct work_struct *);
+static void isert_cq_callback(struct ib_cq *, void *);
static int
isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- struct isert_cq_desc *cq_desc;
struct ib_device_attr *dev_attr;
- int ret = 0, i, j;
- int max_rx_cqe, max_tx_cqe;
+ int ret = 0, i;
+ int max_cqe;
dev_attr = &device->dev_attr;
ret = isert_query_device(ib_dev, dev_attr);
if (ret)
return ret;
- max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
- max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
device->pi_capable = dev_attr->device_cap_flags &
IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
- device->cqs_used = min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors);
- device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
- device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
- device->pi_capable);
- device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
- device->cqs_used, GFP_KERNEL);
- if (!device->cq_desc) {
- pr_err("Unable to allocate device->cq_desc\n");
+ device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
+ device->ib_device->num_comp_vectors));
+ isert_info("Using %d CQs, %s supports %d vectors support "
+ "Fast registration %d pi_capable %d\n",
+ device->comps_used, device->ib_device->name,
+ device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->pi_capable);
+
+ device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
+ GFP_KERNEL);
+ if (!device->comps) {
+ isert_err("Unable to allocate completion contexts\n");
return -ENOMEM;
}
- cq_desc = device->cq_desc;
-
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc[i].device = device;
- cq_desc[i].cq_index = i;
-
- INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
- device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_rx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_rx_cqe, i);
- if (IS_ERR(device->dev_rx_cq[i])) {
- ret = PTR_ERR(device->dev_rx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- goto out_cq;
- }
- INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
- device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_tx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_tx_cqe, i);
- if (IS_ERR(device->dev_tx_cq[i])) {
- ret = PTR_ERR(device->dev_tx_cq[i]);
- device->dev_tx_cq[i] = NULL;
- goto out_cq;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
- if (ret)
+ comp->device = device;
+ INIT_WORK(&comp->work, isert_cq_work);
+ comp->cq = ib_create_cq(device->ib_device,
+ isert_cq_callback,
+ isert_cq_event_callback,
+ (void *)comp,
+ max_cqe, i);
+ if (IS_ERR(comp->cq)) {
+ ret = PTR_ERR(comp->cq);
+ comp->cq = NULL;
goto out_cq;
+ }
- ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
if (ret)
goto out_cq;
}
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
return 0;
out_cq:
- for (j = 0; j < i; j++) {
- cq_desc = &device->cq_desc[j];
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- if (device->dev_rx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_rx_work);
- ib_destroy_cq(device->dev_rx_cq[j]);
- }
- if (device->dev_tx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_tx_cq[j]);
+ if (comp->cq) {
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
}
}
- kfree(device->cq_desc);
+ kfree(device->comps);
return ret;
}
@@ -330,21 +328,18 @@ out_cq:
static void
isert_free_device_ib_res(struct isert_device *device)
{
- struct isert_cq_desc *cq_desc;
int i;
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc = &device->cq_desc[i];
+ isert_info("device %p\n", device);
- cancel_work_sync(&cq_desc->cq_rx_work);
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_rx_cq[i]);
- ib_destroy_cq(device->dev_tx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- device->dev_tx_cq[i] = NULL;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- kfree(device->cq_desc);
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
+ comp->cq = NULL;
+ }
+ kfree(device->comps);
}
static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
+ isert_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
isert_free_device_ib_res(device);
list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
list_for_each_entry(device, &device_list, dev_node) {
if (device->ib_device->node_guid == cma_id->device->node_guid) {
device->refcount++;
+ isert_info("Found iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
}
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
device->refcount++;
list_add_tail(&device->dev_node, &device_list);
+ isert_info("Created a new iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
if (list_empty(&isert_conn->conn_fr_pool))
return;
- pr_debug("Freeing conn %p fastreg pool", isert_conn);
+ isert_info("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
&isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
}
if (i < isert_conn->conn_fr_pool_size)
- pr_warn("Pool still has %d regions registered\n",
+ isert_warn("Pool still has %d regions registered\n",
isert_conn->conn_fr_pool_size - i);
}
static int
+isert_create_pi_ctx(struct fast_reg_descriptor *desc,
+ struct ib_device *device,
+ struct ib_pd *pd)
+{
+ struct ib_mr_init_attr mr_init_attr;
+ struct pi_context *pi_ctx;
+ int ret;
+
+ pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!pi_ctx) {
+ isert_err("Failed to allocate pi context\n");
+ return -ENOMEM;
+ }
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ isert_err("Failed to allocate prot frpl err=%ld\n",
+ PTR_ERR(pi_ctx->prot_frpl));
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto err_pi_ctx;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ isert_err("Failed to allocate prot frmr err=%ld\n",
+ PTR_ERR(pi_ctx->prot_mr));
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto err_prot_frpl;
+ }
+ desc->ind |= ISERT_PROT_KEY_VALID;
+
+ memset(&mr_init_attr, 0, sizeof(mr_init_attr));
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ isert_err("Failed to allocate signature enabled mr err=%ld\n",
+ PTR_ERR(pi_ctx->sig_mr));
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto err_prot_mr;
+ }
+
+ desc->pi_ctx = pi_ctx;
+ desc->ind |= ISERT_SIG_KEY_VALID;
+ desc->ind &= ~ISERT_PROTECTED;
+
+ return 0;
+
+err_prot_mr:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+err_prot_frpl:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+ kfree(desc->pi_ctx);
+
+ return ret;
+}
+
+static int
isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc, u8 protection)
+ struct fast_reg_descriptor *fr_desc)
{
int ret;
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_frpl)) {
- pr_err("Failed to allocate data frpl err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
+ isert_err("Failed to allocate data frpl err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
return PTR_ERR(fr_desc->data_frpl);
}
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_mr)) {
- pr_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
+ isert_err("Failed to allocate data frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
ret = PTR_ERR(fr_desc->data_mr);
goto err_data_frpl;
}
- pr_debug("Create fr_desc %p page_list %p\n",
- fr_desc, fr_desc->data_frpl->page_list);
fr_desc->ind |= ISERT_DATA_KEY_VALID;
- if (protection) {
- struct ib_mr_init_attr mr_init_attr = {0};
- struct pi_context *pi_ctx;
-
- fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
- if (!fr_desc->pi_ctx) {
- pr_err("Failed to allocate pi context\n");
- ret = -ENOMEM;
- goto err_data_mr;
- }
- pi_ctx = fr_desc->pi_ctx;
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- pr_err("Failed to allocate prot frpl err=%ld\n",
- PTR_ERR(pi_ctx->prot_frpl));
- ret = PTR_ERR(pi_ctx->prot_frpl);
- goto err_pi_ctx;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- pr_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_prot_frpl;
- }
- fr_desc->ind |= ISERT_PROT_KEY_VALID;
-
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
- if (IS_ERR(pi_ctx->sig_mr)) {
- pr_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
- fr_desc->ind |= ISERT_SIG_KEY_VALID;
- }
- fr_desc->ind &= ~ISERT_PROTECTED;
+ isert_dbg("Created fr_desc %p\n", fr_desc);
return 0;
-err_prot_mr:
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
-err_prot_frpl:
- ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
-err_pi_ctx:
- kfree(fr_desc->pi_ctx);
-err_data_mr:
- ib_dereg_mr(fr_desc->data_mr);
+
err_data_frpl:
ib_free_fast_reg_page_list(fr_desc->data_frpl);
@@ -513,7 +523,7 @@ err_data_frpl:
}
static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
- pr_err("Failed to allocate fast_reg descriptor\n");
+ isert_err("Failed to allocate fast_reg descriptor\n");
ret = -ENOMEM;
goto err;
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc,
- pi_support);
+ isert_conn->conn_pd, fr_desc);
if (ret) {
- pr_err("Failed to create fastreg descriptor err=%d\n",
+ isert_err("Failed to create fastreg descriptor err=%d\n",
ret);
kfree(fr_desc);
goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
isert_conn->conn_fr_pool_size++;
}
- pr_debug("Creating conn %p fastreg pool size=%d",
+ isert_dbg("Creating conn %p fastreg pool size=%d",
isert_conn, isert_conn->conn_fr_pool_size);
return 0;
@@ -563,47 +572,45 @@ err:
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- struct iscsi_np *np = cma_id->context;
- struct isert_np *isert_np = np->np_context;
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
struct isert_conn *isert_conn;
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
- u8 pi_support;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("iscsi_np is not enabled, reject connect request\n");
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0);
}
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+ isert_dbg("cma_id: %p, portal: %p\n",
cma_id, cma_id->context);
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
if (!isert_conn) {
- pr_err("Unable to allocate isert_conn\n");
+ isert_err("Unable to allocate isert_conn\n");
return -ENOMEM;
}
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
+ init_completion(&isert_conn->login_req_comp);
init_completion(&isert_conn->conn_wait);
- init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
spin_lock_init(&isert_conn->conn_lock);
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
- cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
- pr_err("Unable to allocate isert_conn->login_buf\n");
+ isert_err("Unable to allocate isert_conn->login_buf\n");
ret = -ENOMEM;
goto out;
}
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
- pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
+ isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->initiator_depth = min_t(u8,
event->param.conn.initiator_depth,
device->dev_attr.max_qp_init_rd_atom);
- pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
isert_conn->conn_device = device;
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
if (IS_ERR(isert_conn->conn_pd)) {
ret = PTR_ERR(isert_conn->conn_pd);
- pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+ isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_pd;
}
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(isert_conn->conn_mr)) {
ret = PTR_ERR(isert_conn->conn_mr);
- pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+ isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_mr;
}
- pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
- if (pi_support && !device->pi_capable) {
- pr_err("Protection information requested but not supported, "
- "rejecting connect request\n");
- ret = rdma_reject(cma_id, NULL, 0);
- goto out_mr;
- }
+ ret = isert_conn_setup_qp(isert_conn, cma_id);
+ if (ret)
+ goto out_conn_dev;
- ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
+ ret = isert_rdma_post_recvl(isert_conn);
+ if (ret)
+ goto out_conn_dev;
+
+ ret = isert_rdma_accept(isert_conn);
if (ret)
goto out_conn_dev;
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ isert_info("np %p: Allow accept_np to continue\n", np);
up(&isert_np->np_sem);
return 0;
@@ -705,6 +712,7 @@ out_login_buf:
kfree(isert_conn->login_buf);
out:
kfree(isert_conn);
+ rdma_reject(cma_id, NULL, 0);
return ret;
}
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_device *device = isert_conn->conn_device;
- int cq_index;
- pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p\n", isert_conn);
if (device && device->use_fastreg)
isert_conn_free_fastreg_pool(isert_conn);
+ isert_free_rx_descriptors(isert_conn);
+ rdma_destroy_id(isert_conn->conn_cm_id);
+
if (isert_conn->conn_qp) {
- cq_index = ((struct isert_cq_desc *)
- isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
- pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
- isert_conn->conn_device->cq_active_qps[cq_index]--;
+ struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
- rdma_destroy_qp(isert_conn->conn_cm_id);
- }
+ isert_dbg("dec completion context %p active_qps\n", comp);
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
- isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ ib_destroy_qp(isert_conn->conn_qp);
+ }
ib_dereg_mr(isert_conn->conn_mr);
ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
if (device)
isert_device_try_release(device);
-
- pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
}
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
- struct isert_conn *isert_conn = cma_id->context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
- kref_get(&isert_conn->conn_kref);
+ isert_info("conn %p\n", isert_conn);
+
+ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+ isert_warn("conn %p connect_release is running\n", isert_conn);
+ return;
+ }
+
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+ isert_conn->state = ISER_CONN_UP;
+ mutex_unlock(&isert_conn->conn_mutex);
}
static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
struct isert_conn *isert_conn = container_of(kref,
struct isert_conn, conn_kref);
- pr_debug("Calling isert_connect_release for final kref %s/%d\n",
- current->comm, current->pid);
+ isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
+ current->pid);
isert_connect_release(isert_conn);
}
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
}
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
{
- struct isert_conn *isert_conn = container_of(work,
- struct isert_conn, conn_logout_work);
+ int err;
- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state == ISER_CONN_UP)
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
-
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- mutex_unlock(&isert_conn->conn_mutex);
- goto wake_up;
- }
- if (!isert_conn->conn_cm_id) {
- mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
- return;
+ err = rdma_disconnect(isert_conn->conn_cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
+}
- if (isert_conn->disconnect) {
- /* Send DREQ/DREP towards our initiator */
- rdma_disconnect(isert_conn->conn_cm_id);
- }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+ enum rdma_cm_event_type event)
+{
+ isert_dbg("isert np %p, handling event %d\n", isert_np, event);
- mutex_unlock(&isert_conn->conn_mutex);
+ switch (event) {
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_np->np_cm_id = NULL;
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ isert_np->np_cm_id = isert_setup_id(isert_np);
+ if (IS_ERR(isert_np->np_cm_id)) {
+ isert_err("isert np %p setup id failed: %ld\n",
+ isert_np, PTR_ERR(isert_np->np_cm_id));
+ isert_np->np_cm_id = NULL;
+ }
+ break;
+ default:
+ isert_err("isert np %p Unexpected event %d\n",
+ isert_np, event);
+ }
-wake_up:
- complete(&isert_conn->conn_wait);
+ return -1;
}
static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ enum rdma_cm_event_type event)
{
+ struct isert_np *isert_np = cma_id->context;
struct isert_conn *isert_conn;
- if (!cma_id->qp) {
- struct isert_np *isert_np = cma_id->context;
+ if (isert_np->np_cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event);
- isert_np->np_cm_id = NULL;
- return -1;
- }
+ isert_conn = cma_id->qp->qp_context;
- isert_conn = (struct isert_conn *)cma_id->context;
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
- isert_conn->disconnect = disconnect;
- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
- schedule_work(&isert_conn->conn_logout_work);
+ isert_info("conn %p completing conn_wait\n", isert_conn);
+ complete(&isert_conn->conn_wait);
return 0;
}
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ isert_put_conn(isert_conn);
+}
+
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
- bool disconnect = false;
- pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
- event->event, event->status, cma_id->context, cma_id);
+ isert_info("event %d status %d id %p np %p\n", event->event,
+ event->status, cma_id, cma_id->context);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
if (ret)
- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
- event->event, ret);
+ isert_err("failed handle connect request %d\n", ret);
break;
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
- disconnect = true;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
- ret = isert_disconnected_handler(cma_id, disconnect);
+ ret = isert_disconnected_handler(cma_id, event->event);
break;
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
+ isert_connect_error(cma_id);
+ break;
default:
- pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->conn_rx_descs[rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
&rx_wr_failed);
if (ret) {
- pr_err("ib_post_recv() failed with ret: %d\n", ret);
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
- pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
+ isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
isert_conn->conn_rx_desc_head = rx_head;
}
return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
- if (ret) {
- pr_err("ib_post_send() failed, ret: %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
- }
+ if (ret)
+ isert_err("ib_post_send() failed, ret: %d\n", ret);
return ret;
}
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
- pr_err("ib_dma_mapping_error() failed\n");
+ isert_err("ib_dma_mapping_error() failed\n");
return -ENOMEM;
}
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
- " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
- tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
+ isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
+ tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
+ tx_desc->tx_sg[0].lkey);
return 0;
}
static void
isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_send_wr *send_wr, bool coalesce)
+ struct ib_send_wr *send_wr)
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
- /*
- * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
- * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
- */
- mutex_lock(&isert_conn->conn_mutex);
- if (coalesce && isert_conn->state == ISER_CONN_UP &&
- ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
- tx_desc->llnode_active = true;
- llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
- return;
- }
- isert_conn->conn_comp_batch = 0;
- tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
-
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = isert_conn->conn_mr->lkey;
- pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
+ isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
+ rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
if (ret) {
- pr_err("ib_post_recv() failed: %d\n", ret);
+ isert_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
}
- pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
return ret;
}
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (login->login_complete) {
if (!conn->sess->sess_ops->SessionType &&
isert_conn->conn_device->use_fastreg) {
- /* Normal Session and fastreg is used */
- u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
-
- ret = isert_conn_create_fastreg_pool(isert_conn,
- pi_support);
+ ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
- pr_err("Conn: %p failed to create"
+ isert_err("Conn: %p failed to create"
" fastreg pool\n", isert_conn);
return ret;
}
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (ret)
return ret;
- isert_conn->state = ISER_CONN_UP;
+ /* Now we are in FULL_FEATURE phase */
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
+ mutex_unlock(&isert_conn->conn_mutex);
goto post_send;
}
@@ -1109,18 +1143,17 @@ post_send:
}
static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
- struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
{
+ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
- if (!login) {
- pr_err("conn->conn_login is NULL\n");
- dump_stack();
- return;
- }
+ isert_info("conn %p\n", isert_conn);
+
+ WARN_ON_ONCE(!login);
if (login->first_request) {
struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
- pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
- size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+ isert_dbg("Using login payload size: %d, rx_buflen: %d "
+ "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
+ MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
- pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+ isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
}
isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
sg = &cmd->se_cmd.t_data_sg[0];
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
- pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
- sg, sg_nents, &rx_desc->data[0], imm_data_len);
+ isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
+ sg, sg_nents, &rx_desc->data[0], imm_data_len);
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Unexpected unsolicited_data out
*/
if (!cmd->unsolicited_data) {
- pr_err("Received unexpected solicited data payload\n");
+ isert_err("Received unexpected solicited data payload\n");
dump_stack();
return -1;
}
- pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
- unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
+ isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ unsol_data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Non page-aligned unsolicited_data out
*/
if (page_off) {
- pr_err("Received unexpected non-page aligned data payload\n");
+ isert_err("unexpected non-page aligned data payload\n");
dump_stack();
return -1;
}
- pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
- sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
+ isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
+ "sg_nents: %u from %p %u\n", sg_start, sg_off,
+ sg_nents, &rx_desc->data[0], unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
- pr_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ isert_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
return -ENOMEM;
}
cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
if (sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
- pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
- " ignoring\n", opcode);
+ isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+ " ignoring\n", opcode);
return 0;
}
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
- if (ret > 0)
- wait_for_completion_timeout(&conn->conn_logout_comp,
- SECONDS_FOR_LOGOUT_COMP *
- HZ);
break;
case ISCSI_OP_TEXT:
cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
- pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
if (iser_hdr->flags & ISER_RSV) {
read_stag = be32_to_cpu(iser_hdr->read_stag);
read_va = be64_to_cpu(iser_hdr->read_va);
- pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
- read_stag, (unsigned long long)read_va);
+ isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
+ read_stag, (unsigned long long)read_va);
}
if (iser_hdr->flags & ISER_WSV) {
write_stag = be32_to_cpu(iser_hdr->write_stag);
write_va = be64_to_cpu(iser_hdr->write_va);
- pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
- write_stag, (unsigned long long)write_va);
+ isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
+ write_stag, (unsigned long long)write_va);
}
- pr_debug("ISER ISCSI_CTRL PDU\n");
+ isert_dbg("ISER ISCSI_CTRL PDU\n");
break;
case ISER_HELLO:
- pr_err("iSER Hello message\n");
+ isert_err("iSER Hello message\n");
break;
default:
- pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
break;
}
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
static void
isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
- unsigned long xfer_len)
+ u32 xfer_len)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
if ((char *)desc == isert_conn->login_req_buf) {
rx_dma = isert_conn->login_req_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
- pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
} else {
rx_dma = desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
- pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
}
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
hdr = &desc->iscsi_header;
- pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
hdr->opcode, hdr->itt, hdr->flags,
(int)(xfer_len - ISER_HEADERS_LEN));
- if ((char *)desc == isert_conn->login_req_buf)
- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
- isert_conn);
- else
+ if ((char *)desc == isert_conn->login_req_buf) {
+ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
+
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
+ }
+ mutex_lock(&isert_conn->conn_mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->conn_mutex);
+ } else {
isert_rx_do_work(desc, isert_conn);
+ }
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
isert_conn->post_recv_buf_count--;
- pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
- isert_conn->post_recv_buf_count);
+ isert_dbg("Decremented post_recv_buf_count: %d\n",
+ isert_conn->post_recv_buf_count);
if ((char *)desc == isert_conn->login_req_buf)
return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
ISERT_MIN_POSTED_RX);
err = isert_post_recv(isert_conn, count);
if (err) {
- pr_err("isert_post_recv() count: %d failed, %d\n",
+ isert_err("isert_post_recv() count: %d failed, %d\n",
count, err);
}
}
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
data->dma_dir);
if (unlikely(!data->dma_nents)) {
- pr_err("Cmd: unable to dma map SGs %p\n", sg);
+ isert_err("Cmd: unable to dma map SGs %p\n", sg);
return -EINVAL;
}
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+ isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
return 0;
}
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->data.sg) {
- pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
if (wr->send_wr) {
- pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
+ isert_dbg("Cmd %p free send_wr\n", isert_cmd);
kfree(wr->send_wr);
wr->send_wr = NULL;
}
if (wr->ib_sge) {
- pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
+ isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
kfree(wr->ib_sge);
wr->ib_sge = NULL;
}
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
LIST_HEAD(unmap_list);
- pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->fr_desc) {
- pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
- isert_cmd, wr->fr_desc);
+ isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
if (wr->fr_desc->ind & ISERT_PROTECTED) {
isert_unmap_data_buf(isert_conn, &wr->prot);
wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
if (wr->data.sg) {
- pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_conn *conn = isert_conn->conn;
struct isert_device *device = isert_conn->conn_device;
- pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
- pr_debug("Calling transport_generic_free_cmd from"
+ isert_dbg("Calling transport_generic_free_cmd from"
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
{
if (tx_desc->dma_addr != 0) {
- pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
+ isert_dbg("unmap single for tx_desc->dma_addr\n");
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
- pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+ isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ isert_err("ib_check_mr_status failed, ret %d\n", ret);
goto fail_mr_status;
}
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
do_div(sec_offset_err, block_size);
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
- pr_err("isert: PI error found type %d at sector 0x%llx "
- "expected 0x%x vs actual 0x%x\n",
- mr_status.sig_err.err_type,
- (unsigned long long)se_cmd->bad_sector,
- mr_status.sig_err.expected,
- mr_status.sig_err.actual);
+ isert_err("PI error found type %d at sector 0x%llx "
+ "expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)se_cmd->bad_sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
ret = 1;
}
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->write_data_done = wr->data.len;
wr->send_wr_num = 0;
- pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
+
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
- pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_tmr_post_handler(cmd, cmd->conn);
-
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
- case ISTATE_SEND_REJECT:
- pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
- atomic_dec(&isert_conn->post_send_buf_count);
-
+ case ISTATE_SEND_REJECT: /* FALLTHRU */
+ case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
+ ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
- pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
- case ISTATE_SEND_TEXTRSP:
- atomic_dec(&isert_conn->post_send_buf_count);
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
default:
- pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+ isert_err("Unknown i_state %d\n", cmd->i_state);
dump_stack();
break;
}
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
return;
}
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
-
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
-__isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
if (!isert_cmd) {
- atomic_dec(&isert_conn->post_send_buf_count);
isert_unmap_tx_desc(tx_desc, ib_dev);
return;
}
wr = &isert_cmd->rdma_wr;
+ isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+
switch (wr->iser_ib_op) {
case ISER_IB_RECV:
- pr_err("isert_send_completion: Got ISER_IB_RECV\n");
+ isert_err("Got ISER_IB_RECV\n");
dump_stack();
break;
case ISER_IB_SEND:
- pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
break;
case ISER_IB_RDMA_WRITE:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_write(tx_desc, isert_cmd);
break;
case ISER_IB_RDMA_READ:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
-
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
- pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
+ isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
dump_stack();
break;
}
}
-static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
-{
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct iser_tx_desc *t;
- /*
- * Drain coalesced completion llist starting from comp_llnode_batch
- * setup in isert_init_send_wr(), and then complete trailing tx_desc.
- */
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- __isert_send_completion(t, isert_conn);
- }
- __isert_send_completion(tx_desc, isert_conn);
-}
-
-static void
-isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+/**
+ * is_isert_tx_desc() - Indicate if the completion wr_id
+ * is a TX descriptor or not.
+ * @isert_conn: iser connection
+ * @wr_id: completion WR identifier
+ *
+ * Since we cannot rely on wc opcode in FLUSH errors
+ * we must work around it by checking if the wr_id address
+ * falls in the iser connection rx_descs buffer. If so
+ * it is an RX descriptor, otherwize it is a TX.
+ */
+static inline bool
+is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
{
- struct llist_node *llnode;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
+ void *start = isert_conn->conn_rx_descs;
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
- mutex_lock(&isert_conn->conn_mutex);
- llnode = llist_del_all(&isert_conn->conn_comp_llist);
- isert_conn->conn_comp_batch = 0;
- mutex_unlock(&isert_conn->conn_mutex);
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
-
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
+ if (wr_id >= start && wr_id < start + len)
+ return false;
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
+ return true;
}
static void
-isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
+ if (wc->wr_id == ISER_BEACON_WRID) {
+ isert_info("conn %p completing conn_wait_comp_err\n",
+ isert_conn);
+ complete(&isert_conn->conn_wait_comp_err);
+ } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd;
+ struct iser_tx_desc *desc;
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
+ desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_cmd = desc->isert_cmd;
+ if (!isert_cmd)
+ isert_unmap_tx_desc(desc, ib_dev);
else
- atomic_dec(&isert_conn->post_send_buf_count);
-
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
- tx_desc->comp_llnode_batch = NULL;
-
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
-}
-
-static void
-isert_cq_rx_comp_err(struct isert_conn *isert_conn)
-{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iscsi_conn *conn = isert_conn->conn;
-
- if (isert_conn->post_recv_buf_count)
- return;
-
- isert_cq_drain_comp_llist(isert_conn, ib_dev);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
+ isert_completion_put(desc, isert_cmd, ib_dev, true);
+ } else {
+ isert_conn->post_recv_buf_count--;
+ if (!isert_conn->post_recv_buf_count)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
-
- while (atomic_read(&isert_conn->post_send_buf_count))
- msleep(3000);
-
- mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&isert_conn->conn_mutex);
-
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-
- complete(&isert_conn->conn_wait_comp_err);
}
static void
-isert_cq_tx_work(struct work_struct *work)
+isert_handle_wc(struct ib_wc *wc)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_tx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
struct isert_conn *isert_conn;
struct iser_tx_desc *tx_desc;
- struct ib_wc wc;
-
- while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
- tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ struct iser_rx_desc *rx_desc;
- if (wc.status == IB_WC_SUCCESS) {
- isert_send_completion(tx_desc, isert_conn);
+ isert_conn = wc->qp->qp_context;
+ if (likely(wc->status == IB_WC_SUCCESS)) {
+ if (wc->opcode == IB_WC_RECV) {
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
+ isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
} else {
- pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- pr_debug("TX wc.status: 0x%08x\n", wc.status);
- pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
-
- if (wc.wr_id != ISER_FASTREG_LI_WRID) {
- if (tx_desc->llnode_active)
- continue;
-
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_tx_comp_err(tx_desc, isert_conn);
- }
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_send_completion(tx_desc, isert_conn);
}
- }
-
- ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_tx_callback(struct ib_cq *cq, void *context)
-{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ } else {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("wr id %llx status %d vend_err %x\n",
+ wc->wr_id, wc->status, wc->vendor_err);
+ else
+ isert_dbg("flush error: wr id %llx\n", wc->wr_id);
- queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+ if (wc->wr_id != ISER_FASTREG_LI_WRID)
+ isert_cq_comp_err(isert_conn, wc);
+ }
}
static void
-isert_cq_rx_work(struct work_struct *work)
+isert_cq_work(struct work_struct *work)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_rx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
- struct isert_conn *isert_conn;
- struct iser_rx_desc *rx_desc;
- struct ib_wc wc;
- unsigned long xfer_len;
+ enum { isert_poll_budget = 65536 };
+ struct isert_comp *comp = container_of(work, struct isert_comp,
+ work);
+ struct ib_wc *const wcs = comp->wcs;
+ int i, n, completed = 0;
- while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
- rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
+ for (i = 0; i < n; i++)
+ isert_handle_wc(&wcs[i]);
- if (wc.status == IB_WC_SUCCESS) {
- xfer_len = (unsigned long)wc.byte_len;
- isert_rx_completion(rx_desc, isert_conn, xfer_len);
- } else {
- pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- if (wc.status != IB_WC_WR_FLUSH_ERR) {
- pr_debug("RX wc.status: 0x%08x\n", wc.status);
- pr_debug("RX wc.vendor_err: 0x%08x\n",
- wc.vendor_err);
- }
- isert_conn->post_recv_buf_count--;
- isert_cq_rx_comp_err(isert_conn);
- }
+ completed += n;
+ if (completed >= isert_poll_budget)
+ break;
}
- ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
}
static void
-isert_cq_rx_callback(struct ib_cq *cq, void *context)
+isert_cq_callback(struct ib_cq *cq, void *context)
{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ struct isert_comp *comp = context;
- queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+ queue_work(isert_comp_wq, &comp->work);
}
static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
- pr_err("ib_post_send failed with %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
+ isert_err("ib_post_send failed with %d\n", ret);
return ret;
}
return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("Posting SCSI Response\n");
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_device *device = isert_conn->conn_device;
- if (device->pi_capable)
- return TARGET_PROT_ALL;
+ if (conn->tpg->tpg_attrib.t10_pi) {
+ if (device->pi_capable) {
+ isert_info("conn %p PI offload enabled\n", isert_conn);
+ isert_conn->pi_support = true;
+ return TARGET_PROT_ALL;
+ }
+ }
+
+ isert_info("conn %p PI offload disabled\n", isert_conn);
+ isert_conn->pi_support = false;
return TARGET_PROT_NORMAL;
}
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Logout Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Text Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
send_wr->sg_list = ib_sge;
send_wr->num_sge = sg_nents;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
+ isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
+ "page_off: %u\n",
+ (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length, page_off);
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
ib_sge->lkey = isert_conn->conn_mr->lkey;
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
+ isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
+ ib_sge->addr, ib_sge->length, ib_sge->lkey);
page_off = 0;
data_left -= ib_sge->length;
ib_sge++;
- pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
+ isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
}
- pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- send_wr->sg_list, send_wr->num_sge);
+ isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
+ send_wr->sg_list, send_wr->num_sge);
return sg_nents;
}
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
if (!ib_sge) {
- pr_warn("Unable to allocate ib_sge\n");
+ isert_warn("Unable to allocate ib_sge\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
- pr_debug("Unable to allocate wr->send_wr\n");
+ isert_dbg("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
chunk_start = start_addr;
end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
- pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
- i, (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length);
+ isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
+ i, (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length);
if ((end_addr & ~PAGE_MASK) && i < last_ent) {
new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
page = chunk_start & PAGE_MASK;
do {
fr_pl[n_pages++] = page;
- pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
- n_pages - 1, page);
+ isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
+ n_pages - 1, page);
page += PAGE_SIZE;
} while (page < end_addr);
}
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
return n_pages;
}
+static inline void
+isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ /* Bump the key */
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
+}
+
static int
isert_fast_reg_mr(struct isert_conn *isert_conn,
struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
struct ib_send_wr *bad_wr, *wr = NULL;
int ret, pagelist_len;
u32 page_off;
- u8 key;
if (mem->dma_nents == 1) {
sge->lkey = isert_conn->conn_mr->lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return 0;
}
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
page_off = mem->offset % PAGE_SIZE;
- pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
+ isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
+ fr_desc, mem->nents, mem->offset);
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
&frpl->page_list[0]);
- if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ if (!(fr_desc->ind & ind)) {
+ isert_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
return ret;
}
fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
sge->addr = frpl->page_list[0] + page_off;
sge->length = mem->len;
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return ret;
}
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
break;
default:
- pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+ isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
return -EINVAL;
}
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
}
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
- struct fast_reg_descriptor *fr_desc,
- struct ib_sge *data_sge, struct ib_sge *prot_sge,
- struct ib_sge *sig_sge)
+isert_reg_sig_mr(struct isert_conn *isert_conn,
+ struct se_cmd *se_cmd,
+ struct isert_rdma_wr *rdma_wr,
+ struct fast_reg_descriptor *fr_desc)
{
struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
struct pi_context *pi_ctx = fr_desc->pi_ctx;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = data_sge;
+ sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (se_cmd->t_prot_sg)
- sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
if (!wr)
wr = &sig_wr;
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
goto err;
}
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
- sig_sge->addr = 0;
- sig_sge->length = se_cmd->data_length;
+ rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+ rdma_wr->ib_sg[SIG].addr = 0;
+ rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
/*
* We have protection guards on the wire
* so we need to set a larget transfer
*/
- sig_sge->length += se_cmd->prot_length;
+ rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
- pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- sig_sge->addr, sig_sge->length,
- sig_sge->lkey);
+ isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
+ rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
+ rdma_wr->ib_sg[SIG].lkey);
err:
return ret;
}
static int
+isert_handle_prot_cmd(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct isert_rdma_wr *wr)
+{
+ struct isert_device *device = isert_conn->conn_device;
+ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
+ int ret;
+
+ if (!wr->fr_desc->pi_ctx) {
+ ret = isert_create_pi_ctx(wr->fr_desc,
+ device->ib_device,
+ isert_conn->conn_pd);
+ if (ret) {
+ isert_err("conn %p failed to allocate pi_ctx\n",
+ isert_conn);
+ return ret;
+ }
+ }
+
+ if (se_cmd->t_prot_sg) {
+ ret = isert_map_data_buf(isert_conn, isert_cmd,
+ se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents,
+ se_cmd->prot_length,
+ 0, wr->iser_ib_op, &wr->prot);
+ if (ret) {
+ isert_err("conn %p failed to map protection buffer\n",
+ isert_conn);
+ return ret;
+ }
+
+ memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
+ ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
+ ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ }
+
+ ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ wr->fr_desc->ind |= ISERT_PROTECTED;
+
+ return 0;
+
+unmap_prot_cmd:
+ if (se_cmd->t_prot_sg)
+ isert_unmap_data_buf(isert_conn, &wr->prot);
+
+ return ret;
+}
+
+static int
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct ib_sge data_sge;
- struct ib_send_wr *send_wr;
struct fast_reg_descriptor *fr_desc = NULL;
+ struct ib_send_wr *send_wr;
+ struct ib_sge *ib_sg;
u32 offset;
int ret = 0;
unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (ret)
return ret;
- if (wr->data.dma_nents != 1 ||
- se_cmd->prot_op != TARGET_PROT_NORMAL) {
+ if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &data_sge);
+ ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
if (ret)
goto unmap_cmd;
- if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
- struct ib_sge prot_sge, sig_sge;
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
- if (ret)
- goto unmap_cmd;
-
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &prot_sge);
- if (ret)
- goto unmap_prot_cmd;
- }
-
- ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
- &data_sge, &prot_sge, &sig_sge);
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
if (ret)
- goto unmap_prot_cmd;
+ goto unmap_cmd;
- fr_desc->ind |= ISERT_PROTECTED;
- memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
- } else
- memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+ ib_sg = &wr->ib_sg[SIG];
+ } else {
+ ib_sg = &wr->ib_sg[DATA];
+ }
+ memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
wr->ib_sge = &wr->s_ib_sge;
wr->send_wr_num = 1;
memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
send_wr = &isert_cmd->rdma_wr.s_send_wr;
send_wr->sg_list = &wr->s_ib_sge;
send_wr->num_sge = 1;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+ send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
0 : IB_SEND_SIGNALED;
} else {
send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
return 0;
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
+
unmap_cmd:
if (fr_desc) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
+ isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
+
wr->iser_ib_op = ISER_IB_RDMA_WRITE;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+ if (!isert_prot_cmd(isert_conn, se_cmd)) {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
- &isert_cmd->tx_desc.send_wr, false);
+ &isert_cmd->tx_desc.send_wr);
isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
wr->send_wr_num += 1;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- if (se_cmd->prot_op == TARGET_PROT_NORMAL)
- pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+ if (!isert_prot_cmd(isert_conn, se_cmd))
+ isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
"READ\n", isert_cmd);
else
- pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
isert_cmd);
return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+ isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, se_cmd->data_length, cmd->write_data_done);
wr->iser_ib_op = ISER_IB_RDMA_READ;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
- pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_nopin(cmd, conn, false);
break;
default:
- pr_err("Unknown immediate state: 0x%02x\n", state);
+ isert_err("Unknown immediate state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
static int
isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
+ struct isert_conn *isert_conn = conn->context;
int ret;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
ret = isert_put_logout_rsp(cmd, conn);
- if (!ret) {
- pr_debug("Returning iSER Logout -EAGAIN\n");
- ret = -EAGAIN;
- }
+ if (!ret)
+ isert_conn->logout_posted = true;
break;
case ISTATE_SEND_NOPIN:
ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_response(conn, cmd);
break;
default:
- pr_err("Unknown response state: 0x%02x\n", state);
+ isert_err("Unknown response state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
return ret;
}
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+ struct iscsi_np *np = isert_np->np;
+ struct rdma_cm_id *id;
+ struct sockaddr *sa;
+ int ret;
+
+ sa = (struct sockaddr *)&np->np_sockaddr;
+ isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+ id = rdma_create_id(isert_cma_handler, isert_np,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+ ret = PTR_ERR(id);
+ goto out;
+ }
+ isert_dbg("id %p context %p\n", id, id->context);
+
+ ret = rdma_bind_addr(id, sa);
+ if (ret) {
+ isert_err("rdma_bind_addr() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+ if (ret) {
+ isert_err("rdma_listen() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ return id;
+out_id:
+ rdma_destroy_id(id);
+out:
+ return ERR_PTR(ret);
+}
+
static int
isert_setup_np(struct iscsi_np *np,
struct __kernel_sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
- struct sockaddr *sa;
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
if (!isert_np) {
- pr_err("Unable to allocate struct isert_np\n");
+ isert_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
+ isert_np->np = np;
- sa = (struct sockaddr *)ksockaddr;
- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct __kernel_sockaddr_storage));
- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
- IB_QPT_RC);
+ isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
- PTR_ERR(isert_lid));
ret = PTR_ERR(isert_lid);
goto out;
}
- ret = rdma_bind_addr(isert_lid, sa);
- if (ret) {
- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
- if (ret) {
- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
isert_np->np_cm_id = isert_lid;
np->np_context = isert_np;
- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
return 0;
-out_lid:
- rdma_destroy_id(isert_lid);
out:
kfree(isert_np);
+
return ret;
}
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
cp.retry_count = 7;
cp.rnr_retry_count = 7;
- pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
-
ret = rdma_accept(cm_id, &cp);
if (ret) {
- pr_err("rdma_accept() failed with: %d\n", ret);
+ isert_err("rdma_accept() failed with: %d\n", ret);
return ret;
}
- pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
-
return 0;
}
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
int ret;
- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+ isert_info("before login_req comp conn: %p\n", isert_conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+ if (ret) {
+ isert_err("isert_conn %p interrupted before got login req\n",
+ isert_conn);
+ return ret;
+ }
+ reinit_completion(&isert_conn->login_req_comp);
+
/*
* For login requests after the first PDU, isert_rx_login_req() will
* kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
if (!login->first_request)
return 0;
+ isert_rx_login_req(isert_conn);
+
+ isert_info("before conn_login_comp conn: %p\n", conn);
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
if (ret)
return ret;
- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+ isert_info("processing login->req: %p\n", login->req);
+
return 0;
}
@@ -3161,7 +3136,7 @@ accept_wait:
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("np_thread_state %d for isert_accept_np\n",
+ isert_dbg("np_thread_state %d for isert_accept_np\n",
np->np_thread_state);
/**
* No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
isert_conn->conn = conn;
max_accept = 0;
- ret = isert_rdma_post_recvl(isert_conn);
- if (ret)
- return ret;
-
- ret = isert_rdma_accept(isert_conn);
- if (ret)
- return ret;
-
isert_set_conn_info(np, conn, isert_conn);
- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+ isert_dbg("Processing isert_conn: %p\n", isert_conn);
+
return 0;
}
@@ -3204,25 +3172,103 @@ static void
isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_conn *isert_conn, *n;
if (isert_np->np_cm_id)
rdma_destroy_id(isert_np->np_cm_id);
+ /*
+ * FIXME: At this point we don't have a good way to insure
+ * that at this point we don't have hanging connections that
+ * completed RDMA establishment but didn't start iscsi login
+ * process. So work-around this by cleaning up what ever piled
+ * up in np_accept_list.
+ */
+ mutex_lock(&isert_np->np_accept_mutex);
+ if (!list_empty(&isert_np->np_accept_list)) {
+ isert_info("Still have isert connections, cleaning up...\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->np_accept_list,
+ conn_accept_node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ isert_connect_release(isert_conn);
+ }
+ }
+ mutex_unlock(&isert_np->np_accept_mutex);
+
np->np_context = NULL;
kfree(isert_np);
}
+static void isert_release_work(struct work_struct *work)
+{
+ struct isert_conn *isert_conn = container_of(work,
+ struct isert_conn,
+ release_work);
+
+ isert_info("Starting release conn %p\n", isert_conn);
+
+ wait_for_completion(&isert_conn->conn_wait);
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ isert_info("Destroying conn %p\n", isert_conn);
+ isert_put_conn(isert_conn);
+}
+
+static void
+isert_wait4logout(struct isert_conn *isert_conn)
+{
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ isert_info("conn %p\n", isert_conn);
+
+ if (isert_conn->logout_posted) {
+ isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ }
+}
+
+static void
+isert_wait4cmds(struct iscsi_conn *conn)
+{
+ isert_info("iscsi_conn %p\n", conn);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
+ }
+}
+
+static void
+isert_wait4flush(struct isert_conn *isert_conn)
+{
+ struct ib_recv_wr *bad_wr;
+
+ isert_info("conn %p\n", isert_conn);
+
+ init_completion(&isert_conn->conn_wait_comp_err);
+ isert_conn->beacon.wr_id = ISER_BEACON_WRID;
+ /* post an indication that all flush errors were consumed */
+ if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+ isert_err("conn %p failed to post beacon", isert_conn);
+ return;
+ }
+
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_wait_conn: Starting \n");
+ isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
- rdma_disconnect(isert_conn->conn_cm_id);
- }
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
mutex_unlock(&isert_conn->conn_mutex);
return;
}
- if (isert_conn->state == ISER_CONN_UP)
- isert_conn->state = ISER_CONN_TERMINATING;
+ isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->conn_mutex);
- wait_for_completion(&isert_conn->conn_wait_comp_err);
+ isert_wait4cmds(conn);
+ isert_wait4flush(isert_conn);
+ isert_wait4logout(isert_conn);
- wait_for_completion(&isert_conn->conn_wait);
- isert_put_conn(isert_conn);
+ INIT_WORK(&isert_conn->release_work, isert_release_work);
+ queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
{
int ret;
- isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
- if (!isert_rx_wq) {
- pr_err("Unable to allocate isert_rx_wq\n");
+ isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+ if (!isert_comp_wq) {
+ isert_err("Unable to allocate isert_comp_wq\n");
+ ret = -ENOMEM;
return -ENOMEM;
}
- isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
- if (!isert_comp_wq) {
- pr_err("Unable to allocate isert_comp_wq\n");
+ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!isert_release_wq) {
+ isert_err("Unable to allocate isert_release_wq\n");
ret = -ENOMEM;
- goto destroy_rx_wq;
+ goto destroy_comp_wq;
}
iscsit_register_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+
return 0;
-destroy_rx_wq:
- destroy_workqueue(isert_rx_wq);
+destroy_comp_wq:
+ destroy_workqueue(isert_comp_wq);
+
return ret;
}
static void __exit isert_exit(void)
{
flush_scheduled_work();
+ destroy_workqueue(isert_release_wq);
destroy_workqueue(isert_comp_wq);
- destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 04f51f7bf614..8dc8415d152d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -4,9 +4,37 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#define DRV_NAME "isert"
+#define PFX DRV_NAME ": "
+
+#define isert_dbg(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 2)) \
+ printk(KERN_DEBUG PFX "%s: " fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_warn(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 0)) \
+ pr_warn(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_info(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 1)) \
+ pr_info(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
};
@@ -44,9 +73,6 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
- struct llist_node *comp_llnode_batch;
- struct llist_node comp_llnode;
- bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -81,6 +107,12 @@ struct isert_data_buf {
enum dma_data_direction dma_dir;
};
+enum {
+ DATA = 0,
+ PROT = 1,
+ SIG = 2,
+};
+
struct isert_rdma_wr {
struct list_head wr_list;
struct isert_cmd *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
int send_wr_num;
struct ib_send_wr *send_wr;
struct ib_send_wr s_send_wr;
+ struct ib_sge ib_sg[3];
struct isert_data_buf data;
struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
int post_recv_buf_count;
- atomic_t post_send_buf_count;
u32 responder_resources;
u32 initiator_depth;
+ bool pi_support;
u32 max_sge;
char *login_buf;
char *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
+ int login_req_len;
u64 login_rsp_dma;
unsigned int conn_rx_desc_head;
struct iser_rx_desc *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
struct iscsi_conn *conn;
struct list_head conn_accept_node;
struct completion conn_login_comp;
+ struct completion login_req_comp;
struct iser_tx_desc conn_login_tx_desc;
struct rdma_cm_id *conn_cm_id;
struct ib_pd *conn_pd;
struct ib_mr *conn_mr;
struct ib_qp *conn_qp;
struct isert_device *conn_device;
- struct work_struct conn_logout_work;
struct mutex conn_mutex;
struct completion conn_wait;
struct completion conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
int conn_fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t conn_lock;
-#define ISERT_COMP_BATCH_COUNT 8
- int conn_comp_batch;
- struct llist_head conn_comp_llist;
- bool disconnect;
+ struct work_struct release_work;
+ struct ib_recv_wr beacon;
+ bool logout_posted;
};
#define ISERT_MAX_CQ 64
-struct isert_cq_desc {
- struct isert_device *device;
- int cq_index;
- struct work_struct cq_rx_work;
- struct work_struct cq_tx_work;
+/**
+ * struct isert_comp - iSER completion context
+ *
+ * @device: pointer to device handle
+ * @cq: completion queue
+ * @wcs: work completion array
+ * @active_qps: Number of active QPs attached
+ * to completion context
+ * @work: completion work handle
+ */
+struct isert_comp {
+ struct isert_device *device;
+ struct ib_cq *cq;
+ struct ib_wc wcs[16];
+ int active_qps;
+ struct work_struct work;
};
struct isert_device {
int use_fastreg;
bool pi_capable;
- int cqs_used;
int refcount;
- int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device;
- struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
- struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
- struct isert_cq_desc *cq_desc;
+ struct isert_comp *comps;
+ int comps_used;
struct list_head dev_node;
struct ib_device_attr dev_attr;
int (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
};
struct isert_np {
+ struct iscsi_np *np;
struct semaphore np_sem;
struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5461924c9f10..0747c0595a9d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
@@ -2929,7 +2928,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
return -ENOMEM;
sep_opt = options;
- while ((p = strsep(&sep_opt, ",")) != NULL) {
+ while ((p = strsep(&sep_opt, ",\n")) != NULL) {
if (!*p)
continue;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dc829682701a..eb694ddad79f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
switch (srp_cmd->task_attr) {
case SRP_CMD_SIMPLE_Q:
- cmd->sam_task_attr = MSG_SIMPLE_TAG;
+ cmd->sam_task_attr = TCM_SIMPLE_TAG;
break;
case SRP_CMD_ORDERED_Q:
default:
- cmd->sam_task_attr = MSG_ORDERED_TAG;
+ cmd->sam_task_attr = TCM_ORDERED_TAG;
break;
case SRP_CMD_HEAD_OF_Q:
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case SRP_CMD_ACA:
- cmd->sam_task_attr = MSG_ACA_TAG;
+ cmd->sam_task_attr = TCM_ACA_TAG;
break;
}
@@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
sizeof(srp_cmd->lun));
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
&send_ioctx->sense_data[0], unpacked_lun, data_len,
- MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index e29c04e2aff4..e853a2134680 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
*/
static void gameport_init_port(struct gameport *gameport)
{
- static atomic_t gameport_no = ATOMIC_INIT(0);
+ static atomic_t gameport_no = ATOMIC_INIT(-1);
__module_get(THIS_MODULE);
mutex_init(&gameport->drv_mutex);
device_initialize(&gameport->dev);
dev_set_name(&gameport->dev, "gameport%lu",
- (unsigned long)atomic_inc_return(&gameport_no) - 1);
+ (unsigned long)atomic_inc_return(&gameport_no));
gameport->dev.bus = &gameport_bus;
gameport->dev.release = gameport_release_port;
if (gameport->parent)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 0f175f55782b..04217c2e345c 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
*/
struct input_dev *input_allocate_device(void)
{
- static atomic_t input_no = ATOMIC_INIT(0);
+ static atomic_t input_no = ATOMIC_INIT(-1);
struct input_dev *dev;
dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
INIT_LIST_HEAD(&dev->node);
dev_set_name(&dev->dev, "input%lu",
- (unsigned long) atomic_inc_return(&input_no) - 1);
+ (unsigned long)atomic_inc_return(&input_no));
__module_get(THIS_MODULE);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fc55f0d15b70..3aa2f3f3da5b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -886,8 +886,8 @@ static void xpad_led_set(struct led_classdev *led_cdev,
static int xpad_led_probe(struct usb_xpad *xpad)
{
- static atomic_t led_seq = ATOMIC_INIT(0);
- long led_no;
+ static atomic_t led_seq = ATOMIC_INIT(-1);
+ unsigned long led_no;
struct xpad_led *led;
struct led_classdev *led_cdev;
int error;
@@ -899,9 +899,9 @@ static int xpad_led_probe(struct usb_xpad *xpad)
if (!led)
return -ENOMEM;
- led_no = (long)atomic_inc_return(&led_seq) - 1;
+ led_no = atomic_inc_return(&led_seq);
- snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
led->xpad = xpad;
led_cdev = &led->led_cdev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a3958c63d7d5..96ee26c555e0 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -665,14 +665,14 @@ config KEYBOARD_CROS_EC
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
-config KEYBOARD_CAP1106
- tristate "Microchip CAP1106 touch sensor"
+config KEYBOARD_CAP11XX
+ tristate "Microchip CAP11XX based touch sensors"
depends on OF && I2C
select REGMAP_I2C
help
- Say Y here to enable the CAP1106 touch sensor driver.
+ Say Y here to enable the CAP11XX touch sensor driver.
To compile this driver as a module, choose M here: the
- module will be called cap1106.
+ module will be called cap11xx.
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 0a3345634d79..febafa527eb6 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CAP1106) += cap1106.o
+obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index d3b8c58fcfdb..e04a3b4e55d6 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -45,6 +45,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
+#ifdef CONFIG_HW_CONSOLE
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
@@ -144,6 +145,32 @@ static unsigned char amikbd_keycode[0x78] __initdata = {
[103] = KEY_RIGHTMETA
};
+static void __init amikbd_init_console_keymaps(void)
+{
+ /* We can spare 512 bytes on stack for temp_map in init path. */
+ unsigned short temp_map[NR_KEYS];
+ int i, j;
+
+ for (i = 0; i < MAX_NR_KEYMAPS; i++) {
+ if (!key_maps[i])
+ continue;
+ memset(temp_map, 0, sizeof(temp_map));
+ for (j = 0; j < 0x78; j++) {
+ if (!amikbd_keycode[j])
+ continue;
+ temp_map[j] = key_maps[i][amikbd_keycode[j]];
+ }
+ for (j = 0; j < NR_KEYS; j++) {
+ if (!temp_map[j])
+ temp_map[j] = 0xf200;
+ }
+ memcpy(key_maps[i], temp_map, sizeof(temp_map));
+ }
+}
+#else /* !CONFIG_HW_CONSOLE */
+static inline void amikbd_init_console_keymaps(void) {}
+#endif /* !CONFIG_HW_CONSOLE */
+
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
[1] = KERN_WARNING "amikbd: keyboard lost sync\n",
@@ -186,7 +213,7 @@ static irqreturn_t amikbd_interrupt(int irq, void *data)
static int __init amikbd_probe(struct platform_device *pdev)
{
struct input_dev *dev;
- int i, j, err;
+ int i, err;
dev = input_allocate_device();
if (!dev) {
@@ -207,22 +234,8 @@ static int __init amikbd_probe(struct platform_device *pdev)
for (i = 0; i < 0x78; i++)
set_bit(i, dev->keybit);
- for (i = 0; i < MAX_NR_KEYMAPS; i++) {
- static u_short temp_map[NR_KEYS] __initdata;
- if (!key_maps[i])
- continue;
- memset(temp_map, 0, sizeof(temp_map));
- for (j = 0; j < 0x78; j++) {
- if (!amikbd_keycode[j])
- continue;
- temp_map[j] = key_maps[i][amikbd_keycode[j]];
- }
- for (j = 0; j < NR_KEYS; j++) {
- if (!temp_map[j])
- temp_map[j] = 0xf200;
- }
- memcpy(key_maps[i], temp_map, sizeof(temp_map));
- }
+ amikbd_init_console_keymaps();
+
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
dev);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6f5d79569136..e27a25892db4 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -456,8 +456,9 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
keycode = atkbd->keycode[code];
- if (keycode != ATKBD_KEY_NULL)
- input_event(dev, EV_MSC, MSC_SCAN, code);
+ if (!(atkbd->release && test_bit(code, atkbd->force_release_mask)))
+ if (keycode != ATKBD_KEY_NULL)
+ input_event(dev, EV_MSC, MSC_SCAN, code);
switch (keycode) {
case ATKBD_KEY_NULL:
@@ -511,6 +512,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
input_sync(dev);
if (value && test_bit(code, atkbd->force_release_mask)) {
+ input_event(dev, EV_MSC, MSC_SCAN, code);
input_report_key(dev, keycode, 0);
input_sync(dev);
}
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
deleted file mode 100644
index d70b65a14ced..000000000000
--- a/drivers/input/keyboard/cap1106.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Input driver for Microchip CAP1106, 6 channel capacitive touch sensor
- *
- * http://www.microchip.com/wwwproducts/Devices.aspx?product=CAP1106
- *
- * (c) 2014 Daniel Mack <linux@zonque.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#include <linux/i2c.h>
-#include <linux/gpio/consumer.h>
-
-#define CAP1106_REG_MAIN_CONTROL 0x00
-#define CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT (6)
-#define CAP1106_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
-#define CAP1106_REG_MAIN_CONTROL_DLSEEP BIT(4)
-#define CAP1106_REG_GENERAL_STATUS 0x02
-#define CAP1106_REG_SENSOR_INPUT 0x03
-#define CAP1106_REG_NOISE_FLAG_STATUS 0x0a
-#define CAP1106_REG_SENOR_DELTA(X) (0x10 + (X))
-#define CAP1106_REG_SENSITIVITY_CONTROL 0x1f
-#define CAP1106_REG_CONFIG 0x20
-#define CAP1106_REG_SENSOR_ENABLE 0x21
-#define CAP1106_REG_SENSOR_CONFIG 0x22
-#define CAP1106_REG_SENSOR_CONFIG2 0x23
-#define CAP1106_REG_SAMPLING_CONFIG 0x24
-#define CAP1106_REG_CALIBRATION 0x26
-#define CAP1106_REG_INT_ENABLE 0x27
-#define CAP1106_REG_REPEAT_RATE 0x28
-#define CAP1106_REG_MT_CONFIG 0x2a
-#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
-#define CAP1106_REG_MT_PATTERN 0x2d
-#define CAP1106_REG_RECALIB_CONFIG 0x2f
-#define CAP1106_REG_SENSOR_THRESH(X) (0x30 + (X))
-#define CAP1106_REG_SENSOR_NOISE_THRESH 0x38
-#define CAP1106_REG_STANDBY_CHANNEL 0x40
-#define CAP1106_REG_STANDBY_CONFIG 0x41
-#define CAP1106_REG_STANDBY_SENSITIVITY 0x42
-#define CAP1106_REG_STANDBY_THRESH 0x43
-#define CAP1106_REG_CONFIG2 0x44
-#define CAP1106_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
-#define CAP1106_REG_SENSOR_CALIB (0xb1 + (X))
-#define CAP1106_REG_SENSOR_CALIB_LSB1 0xb9
-#define CAP1106_REG_SENSOR_CALIB_LSB2 0xba
-#define CAP1106_REG_PRODUCT_ID 0xfd
-#define CAP1106_REG_MANUFACTURER_ID 0xfe
-#define CAP1106_REG_REVISION 0xff
-
-#define CAP1106_NUM_CHN 6
-#define CAP1106_PRODUCT_ID 0x55
-#define CAP1106_MANUFACTURER_ID 0x5d
-
-struct cap1106_priv {
- struct regmap *regmap;
- struct input_dev *idev;
-
- /* config */
- unsigned short keycodes[CAP1106_NUM_CHN];
-};
-
-static const struct reg_default cap1106_reg_defaults[] = {
- { CAP1106_REG_MAIN_CONTROL, 0x00 },
- { CAP1106_REG_GENERAL_STATUS, 0x00 },
- { CAP1106_REG_SENSOR_INPUT, 0x00 },
- { CAP1106_REG_NOISE_FLAG_STATUS, 0x00 },
- { CAP1106_REG_SENSITIVITY_CONTROL, 0x2f },
- { CAP1106_REG_CONFIG, 0x20 },
- { CAP1106_REG_SENSOR_ENABLE, 0x3f },
- { CAP1106_REG_SENSOR_CONFIG, 0xa4 },
- { CAP1106_REG_SENSOR_CONFIG2, 0x07 },
- { CAP1106_REG_SAMPLING_CONFIG, 0x39 },
- { CAP1106_REG_CALIBRATION, 0x00 },
- { CAP1106_REG_INT_ENABLE, 0x3f },
- { CAP1106_REG_REPEAT_RATE, 0x3f },
- { CAP1106_REG_MT_CONFIG, 0x80 },
- { CAP1106_REG_MT_PATTERN_CONFIG, 0x00 },
- { CAP1106_REG_MT_PATTERN, 0x3f },
- { CAP1106_REG_RECALIB_CONFIG, 0x8a },
- { CAP1106_REG_SENSOR_THRESH(0), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(1), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(2), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(3), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(4), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(5), 0x40 },
- { CAP1106_REG_SENSOR_NOISE_THRESH, 0x01 },
- { CAP1106_REG_STANDBY_CHANNEL, 0x00 },
- { CAP1106_REG_STANDBY_CONFIG, 0x39 },
- { CAP1106_REG_STANDBY_SENSITIVITY, 0x02 },
- { CAP1106_REG_STANDBY_THRESH, 0x40 },
- { CAP1106_REG_CONFIG2, 0x40 },
- { CAP1106_REG_SENSOR_CALIB_LSB1, 0x00 },
- { CAP1106_REG_SENSOR_CALIB_LSB2, 0x00 },
-};
-
-static bool cap1106_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case CAP1106_REG_MAIN_CONTROL:
- case CAP1106_REG_SENSOR_INPUT:
- case CAP1106_REG_SENOR_DELTA(0):
- case CAP1106_REG_SENOR_DELTA(1):
- case CAP1106_REG_SENOR_DELTA(2):
- case CAP1106_REG_SENOR_DELTA(3):
- case CAP1106_REG_SENOR_DELTA(4):
- case CAP1106_REG_SENOR_DELTA(5):
- case CAP1106_REG_PRODUCT_ID:
- case CAP1106_REG_MANUFACTURER_ID:
- case CAP1106_REG_REVISION:
- return true;
- }
-
- return false;
-}
-
-static const struct regmap_config cap1106_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = CAP1106_REG_REVISION,
- .reg_defaults = cap1106_reg_defaults,
-
- .num_reg_defaults = ARRAY_SIZE(cap1106_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
- .volatile_reg = cap1106_volatile_reg,
-};
-
-static irqreturn_t cap1106_thread_func(int irq_num, void *data)
-{
- struct cap1106_priv *priv = data;
- unsigned int status;
- int ret, i;
-
- /*
- * Deassert interrupt. This needs to be done before reading the status
- * registers, which will not carry valid values otherwise.
- */
- ret = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, 1, 0);
- if (ret < 0)
- goto out;
-
- ret = regmap_read(priv->regmap, CAP1106_REG_SENSOR_INPUT, &status);
- if (ret < 0)
- goto out;
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- input_report_key(priv->idev, priv->keycodes[i],
- status & (1 << i));
-
- input_sync(priv->idev);
-
-out:
- return IRQ_HANDLED;
-}
-
-static int cap1106_set_sleep(struct cap1106_priv *priv, bool sleep)
-{
- return regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_DLSEEP,
- sleep ? CAP1106_REG_MAIN_CONTROL_DLSEEP : 0);
-}
-
-static int cap1106_input_open(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- return cap1106_set_sleep(priv, false);
-}
-
-static void cap1106_input_close(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- cap1106_set_sleep(priv, true);
-}
-
-static int cap1106_i2c_probe(struct i2c_client *i2c_client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &i2c_client->dev;
- struct cap1106_priv *priv;
- struct device_node *node;
- int i, error, irq, gain = 0;
- unsigned int val, rev;
- u32 gain32, keycodes[CAP1106_NUM_CHN];
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->regmap = devm_regmap_init_i2c(i2c_client, &cap1106_regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
- error = regmap_read(priv->regmap, CAP1106_REG_PRODUCT_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_PRODUCT_ID) {
- dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_PRODUCT_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_MANUFACTURER_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_MANUFACTURER_ID) {
- dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_MANUFACTURER_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_REVISION, &rev);
- if (error < 0)
- return error;
-
- dev_info(dev, "CAP1106 detected, revision 0x%02x\n", rev);
- i2c_set_clientdata(i2c_client, priv);
- node = dev->of_node;
-
- if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
- if (is_power_of_2(gain32) && gain32 <= 8)
- gain = ilog2(gain32);
- else
- dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
- }
-
- BUILD_BUG_ON(ARRAY_SIZE(keycodes) != ARRAY_SIZE(priv->keycodes));
-
- /* Provide some useful defaults */
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- keycodes[i] = KEY_A + i;
-
- of_property_read_u32_array(node, "linux,keycodes",
- keycodes, ARRAY_SIZE(keycodes));
-
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- priv->keycodes[i] = keycodes[i];
-
- error = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_GAIN_MASK,
- gain << CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT);
- if (error)
- return error;
-
- /* Disable autorepeat. The Linux input system has its own handling. */
- error = regmap_write(priv->regmap, CAP1106_REG_REPEAT_RATE, 0);
- if (error)
- return error;
-
- priv->idev = devm_input_allocate_device(dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "CAP1106 capacitive touch sensor";
- priv->idev->id.bustype = BUS_I2C;
- priv->idev->evbit[0] = BIT_MASK(EV_KEY);
-
- if (of_property_read_bool(node, "autorepeat"))
- __set_bit(EV_REP, priv->idev->evbit);
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- __set_bit(priv->keycodes[i], priv->idev->keybit);
-
- __clear_bit(KEY_RESERVED, priv->idev->keybit);
-
- priv->idev->keycode = priv->keycodes;
- priv->idev->keycodesize = sizeof(priv->keycodes[0]);
- priv->idev->keycodemax = ARRAY_SIZE(priv->keycodes);
-
- priv->idev->id.vendor = CAP1106_MANUFACTURER_ID;
- priv->idev->id.product = CAP1106_PRODUCT_ID;
- priv->idev->id.version = rev;
-
- priv->idev->open = cap1106_input_open;
- priv->idev->close = cap1106_input_close;
-
- input_set_drvdata(priv->idev, priv);
-
- /*
- * Put the device in deep sleep mode for now.
- * ->open() will bring it back once the it is actually needed.
- */
- cap1106_set_sleep(priv, true);
-
- error = input_register_device(priv->idev);
- if (error)
- return error;
-
- irq = irq_of_parse_and_map(node, 0);
- if (!irq) {
- dev_err(dev, "Unable to parse or map IRQ\n");
- return -ENXIO;
- }
-
- error = devm_request_threaded_irq(dev, irq, NULL, cap1106_thread_func,
- IRQF_ONESHOT, dev_name(dev), priv);
- if (error)
- return error;
-
- return 0;
-}
-
-static const struct of_device_id cap1106_dt_ids[] = {
- { .compatible = "microchip,cap1106", },
- {}
-};
-MODULE_DEVICE_TABLE(of, cap1106_dt_ids);
-
-static const struct i2c_device_id cap1106_i2c_ids[] = {
- { "cap1106", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, cap1106_i2c_ids);
-
-static struct i2c_driver cap1106_i2c_driver = {
- .driver = {
- .name = "cap1106",
- .owner = THIS_MODULE,
- .of_match_table = cap1106_dt_ids,
- },
- .id_table = cap1106_i2c_ids,
- .probe = cap1106_i2c_probe,
-};
-
-module_i2c_driver(cap1106_i2c_driver);
-
-MODULE_ALIAS("platform:cap1106");
-MODULE_DESCRIPTION("Microchip CAP1106 driver");
-MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
new file mode 100644
index 000000000000..4f59f0bab28f
--- /dev/null
+++ b/drivers/input/keyboard/cap11xx.c
@@ -0,0 +1,376 @@
+/*
+ * Input driver for Microchip CAP11xx based capacitive touch sensors
+ *
+ * (c) 2014 Daniel Mack <linux@zonque.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+
+#define CAP11XX_REG_MAIN_CONTROL 0x00
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT (6)
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
+#define CAP11XX_REG_MAIN_CONTROL_DLSEEP BIT(4)
+#define CAP11XX_REG_GENERAL_STATUS 0x02
+#define CAP11XX_REG_SENSOR_INPUT 0x03
+#define CAP11XX_REG_NOISE_FLAG_STATUS 0x0a
+#define CAP11XX_REG_SENOR_DELTA(X) (0x10 + (X))
+#define CAP11XX_REG_SENSITIVITY_CONTROL 0x1f
+#define CAP11XX_REG_CONFIG 0x20
+#define CAP11XX_REG_SENSOR_ENABLE 0x21
+#define CAP11XX_REG_SENSOR_CONFIG 0x22
+#define CAP11XX_REG_SENSOR_CONFIG2 0x23
+#define CAP11XX_REG_SAMPLING_CONFIG 0x24
+#define CAP11XX_REG_CALIBRATION 0x26
+#define CAP11XX_REG_INT_ENABLE 0x27
+#define CAP11XX_REG_REPEAT_RATE 0x28
+#define CAP11XX_REG_MT_CONFIG 0x2a
+#define CAP11XX_REG_MT_PATTERN_CONFIG 0x2b
+#define CAP11XX_REG_MT_PATTERN 0x2d
+#define CAP11XX_REG_RECALIB_CONFIG 0x2f
+#define CAP11XX_REG_SENSOR_THRESH(X) (0x30 + (X))
+#define CAP11XX_REG_SENSOR_NOISE_THRESH 0x38
+#define CAP11XX_REG_STANDBY_CHANNEL 0x40
+#define CAP11XX_REG_STANDBY_CONFIG 0x41
+#define CAP11XX_REG_STANDBY_SENSITIVITY 0x42
+#define CAP11XX_REG_STANDBY_THRESH 0x43
+#define CAP11XX_REG_CONFIG2 0x44
+#define CAP11XX_REG_CONFIG2_ALT_POL BIT(6)
+#define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
+#define CAP11XX_REG_SENSOR_CALIB (0xb1 + (X))
+#define CAP11XX_REG_SENSOR_CALIB_LSB1 0xb9
+#define CAP11XX_REG_SENSOR_CALIB_LSB2 0xba
+#define CAP11XX_REG_PRODUCT_ID 0xfd
+#define CAP11XX_REG_MANUFACTURER_ID 0xfe
+#define CAP11XX_REG_REVISION 0xff
+
+#define CAP11XX_MANUFACTURER_ID 0x5d
+
+struct cap11xx_priv {
+ struct regmap *regmap;
+ struct input_dev *idev;
+
+ /* config */
+ u32 keycodes[];
+};
+
+struct cap11xx_hw_model {
+ u8 product_id;
+ unsigned int num_channels;
+};
+
+enum {
+ CAP1106,
+ CAP1126,
+ CAP1188,
+};
+
+static const struct cap11xx_hw_model cap11xx_devices[] = {
+ [CAP1106] = { .product_id = 0x55, .num_channels = 6 },
+ [CAP1126] = { .product_id = 0x53, .num_channels = 6 },
+ [CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+};
+
+static const struct reg_default cap11xx_reg_defaults[] = {
+ { CAP11XX_REG_MAIN_CONTROL, 0x00 },
+ { CAP11XX_REG_GENERAL_STATUS, 0x00 },
+ { CAP11XX_REG_SENSOR_INPUT, 0x00 },
+ { CAP11XX_REG_NOISE_FLAG_STATUS, 0x00 },
+ { CAP11XX_REG_SENSITIVITY_CONTROL, 0x2f },
+ { CAP11XX_REG_CONFIG, 0x20 },
+ { CAP11XX_REG_SENSOR_ENABLE, 0x3f },
+ { CAP11XX_REG_SENSOR_CONFIG, 0xa4 },
+ { CAP11XX_REG_SENSOR_CONFIG2, 0x07 },
+ { CAP11XX_REG_SAMPLING_CONFIG, 0x39 },
+ { CAP11XX_REG_CALIBRATION, 0x00 },
+ { CAP11XX_REG_INT_ENABLE, 0x3f },
+ { CAP11XX_REG_REPEAT_RATE, 0x3f },
+ { CAP11XX_REG_MT_CONFIG, 0x80 },
+ { CAP11XX_REG_MT_PATTERN_CONFIG, 0x00 },
+ { CAP11XX_REG_MT_PATTERN, 0x3f },
+ { CAP11XX_REG_RECALIB_CONFIG, 0x8a },
+ { CAP11XX_REG_SENSOR_THRESH(0), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(1), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(2), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(3), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(4), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(5), 0x40 },
+ { CAP11XX_REG_SENSOR_NOISE_THRESH, 0x01 },
+ { CAP11XX_REG_STANDBY_CHANNEL, 0x00 },
+ { CAP11XX_REG_STANDBY_CONFIG, 0x39 },
+ { CAP11XX_REG_STANDBY_SENSITIVITY, 0x02 },
+ { CAP11XX_REG_STANDBY_THRESH, 0x40 },
+ { CAP11XX_REG_CONFIG2, 0x40 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB1, 0x00 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB2, 0x00 },
+};
+
+static bool cap11xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CAP11XX_REG_MAIN_CONTROL:
+ case CAP11XX_REG_SENSOR_INPUT:
+ case CAP11XX_REG_SENOR_DELTA(0):
+ case CAP11XX_REG_SENOR_DELTA(1):
+ case CAP11XX_REG_SENOR_DELTA(2):
+ case CAP11XX_REG_SENOR_DELTA(3):
+ case CAP11XX_REG_SENOR_DELTA(4):
+ case CAP11XX_REG_SENOR_DELTA(5):
+ case CAP11XX_REG_PRODUCT_ID:
+ case CAP11XX_REG_MANUFACTURER_ID:
+ case CAP11XX_REG_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config cap11xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CAP11XX_REG_REVISION,
+ .reg_defaults = cap11xx_reg_defaults,
+
+ .num_reg_defaults = ARRAY_SIZE(cap11xx_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = cap11xx_volatile_reg,
+};
+
+static irqreturn_t cap11xx_thread_func(int irq_num, void *data)
+{
+ struct cap11xx_priv *priv = data;
+ unsigned int status;
+ int ret, i;
+
+ /*
+ * Deassert interrupt. This needs to be done before reading the status
+ * registers, which will not carry valid values otherwise.
+ */
+ ret = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL, 1, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(priv->regmap, CAP11XX_REG_SENSOR_INPUT, &status);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < priv->idev->keycodemax; i++)
+ input_report_key(priv->idev, priv->keycodes[i],
+ status & (1 << i));
+
+ input_sync(priv->idev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
+{
+ return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_DLSEEP,
+ sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
+}
+
+static int cap11xx_input_open(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ return cap11xx_set_sleep(priv, false);
+}
+
+static void cap11xx_input_close(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ cap11xx_set_sleep(priv, true);
+}
+
+static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c_client->dev;
+ struct cap11xx_priv *priv;
+ struct device_node *node;
+ const struct cap11xx_hw_model *cap;
+ int i, error, irq, gain = 0;
+ unsigned int val, rev;
+ u32 gain32;
+
+ if (id->driver_data >= ARRAY_SIZE(cap11xx_devices)) {
+ dev_err(dev, "Invalid device ID %lu\n", id->driver_data);
+ return -EINVAL;
+ }
+
+ cap = &cap11xx_devices[id->driver_data];
+ if (!cap || !cap->num_channels) {
+ dev_err(dev, "Invalid device configuration\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev,
+ sizeof(*priv) +
+ cap->num_channels * sizeof(priv->keycodes[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c_client, &cap11xx_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_PRODUCT_ID, &val);
+ if (error)
+ return error;
+
+ if (val != cap->product_id) {
+ dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
+ val, cap->product_id);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_MANUFACTURER_ID, &val);
+ if (error)
+ return error;
+
+ if (val != CAP11XX_MANUFACTURER_ID) {
+ dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
+ val, CAP11XX_MANUFACTURER_ID);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_REVISION, &rev);
+ if (error < 0)
+ return error;
+
+ dev_info(dev, "CAP11XX detected, revision 0x%02x\n", rev);
+ i2c_set_clientdata(i2c_client, priv);
+ node = dev->of_node;
+
+ if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
+ if (is_power_of_2(gain32) && gain32 <= 8)
+ gain = ilog2(gain32);
+ else
+ dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
+ }
+
+ if (of_property_read_bool(node, "microchip,irq-active-high")) {
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_CONFIG2,
+ CAP11XX_REG_CONFIG2_ALT_POL, 0);
+ if (error)
+ return error;
+ }
+
+ /* Provide some useful defaults */
+ for (i = 0; i < cap->num_channels; i++)
+ priv->keycodes[i] = KEY_A + i;
+
+ of_property_read_u32_array(node, "linux,keycodes",
+ priv->keycodes, cap->num_channels);
+
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+ gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+
+ /* Disable autorepeat. The Linux input system has its own handling. */
+ error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+ if (error)
+ return error;
+
+ priv->idev = devm_input_allocate_device(dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "CAP11XX capacitive touch sensor";
+ priv->idev->id.bustype = BUS_I2C;
+ priv->idev->evbit[0] = BIT_MASK(EV_KEY);
+
+ if (of_property_read_bool(node, "autorepeat"))
+ __set_bit(EV_REP, priv->idev->evbit);
+
+ for (i = 0; i < cap->num_channels; i++)
+ __set_bit(priv->keycodes[i], priv->idev->keybit);
+
+ __clear_bit(KEY_RESERVED, priv->idev->keybit);
+
+ priv->idev->keycode = priv->keycodes;
+ priv->idev->keycodesize = sizeof(priv->keycodes[0]);
+ priv->idev->keycodemax = cap->num_channels;
+
+ priv->idev->id.vendor = CAP11XX_MANUFACTURER_ID;
+ priv->idev->id.product = cap->product_id;
+ priv->idev->id.version = rev;
+
+ priv->idev->open = cap11xx_input_open;
+ priv->idev->close = cap11xx_input_close;
+
+ input_set_drvdata(priv->idev, priv);
+
+ /*
+ * Put the device in deep sleep mode for now.
+ * ->open() will bring it back once the it is actually needed.
+ */
+ cap11xx_set_sleep(priv, true);
+
+ error = input_register_device(priv->idev);
+ if (error)
+ return error;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(dev, "Unable to parse or map IRQ\n");
+ return -ENXIO;
+ }
+
+ error = devm_request_threaded_irq(dev, irq, NULL, cap11xx_thread_func,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id cap11xx_dt_ids[] = {
+ { .compatible = "microchip,cap1106", },
+ { .compatible = "microchip,cap1126", },
+ { .compatible = "microchip,cap1188", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cap11xx_dt_ids);
+
+static const struct i2c_device_id cap11xx_i2c_ids[] = {
+ { "cap1106", CAP1106 },
+ { "cap1126", CAP1126 },
+ { "cap1188", CAP1188 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids);
+
+static struct i2c_driver cap11xx_i2c_driver = {
+ .driver = {
+ .name = "cap11xx",
+ .owner = THIS_MODULE,
+ .of_match_table = cap11xx_dt_ids,
+ },
+ .id_table = cap11xx_i2c_ids,
+ .probe = cap11xx_i2c_probe,
+};
+
+module_i2c_driver(cap11xx_i2c_driver);
+
+MODULE_ALIAS("platform:cap11xx");
+MODULE_DESCRIPTION("Microchip CAP11XX driver");
+MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8f3a24e15402..d4dd78a7d56b 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
@@ -617,27 +618,31 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio;
+ int gpio = -1;
enum of_gpio_flags flags;
- if (!of_find_property(pp, "gpios", NULL)) {
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios\n");
- continue;
- }
+ button = &pdata->buttons[i++];
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
+ if (!of_find_property(pp, "gpios", NULL)) {
+ button->irq = irq_of_parse_and_map(pp, 0);
+ if (button->irq == 0) {
+ i--;
+ pdata->nbuttons--;
+ dev_warn(dev, "Found button without gpios or irqs\n");
+ continue;
+ }
+ } else {
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ return ERR_PTR(error);
+ }
}
- button = &pdata->buttons[i++];
-
button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index cb32e2b506b7..21bea52d4365 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -616,6 +616,8 @@ static ssize_t lm8323_set_disable(struct device *dev,
unsigned int i;
ret = kstrtouint(buf, 10, &i);
+ if (ret)
+ return ret;
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 8c079371c2e7..265d641c40e2 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -66,7 +66,6 @@
struct lpc32xx_kscan_drv {
struct input_dev *input;
struct clk *clk;
- struct resource *iores;
void __iomem *kscan_base;
unsigned int irq;
@@ -188,32 +187,27 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
return -EINVAL;
}
- kscandat = kzalloc(sizeof(struct lpc32xx_kscan_drv), GFP_KERNEL);
- if (!kscandat) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ kscandat = devm_kzalloc(&pdev->dev, sizeof(*kscandat),
+ GFP_KERNEL);
+ if (!kscandat)
return -ENOMEM;
- }
error = lpc32xx_parse_dt(&pdev->dev, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to parse device tree\n");
- goto err_free_mem;
+ return error;
}
keymap_size = sizeof(kscandat->keymap[0]) *
(kscandat->matrix_sz << kscandat->row_shift);
- kscandat->keymap = kzalloc(keymap_size, GFP_KERNEL);
- if (!kscandat->keymap) {
- dev_err(&pdev->dev, "could not allocate memory for keymap\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kscandat->keymap = devm_kzalloc(&pdev->dev, keymap_size, GFP_KERNEL);
+ if (!kscandat->keymap)
+ return -ENOMEM;
- kscandat->input = input = input_allocate_device();
+ kscandat->input = input = devm_input_allocate_device(&pdev->dev);
if (!input) {
dev_err(&pdev->dev, "failed to allocate input device\n");
- error = -ENOMEM;
- goto err_free_keymap;
+ return -ENOMEM;
}
/* Setup key input */
@@ -234,39 +228,26 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
kscandat->keymap, kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_input;
+ return error;
}
input_set_drvdata(kscandat->input, kscandat);
- kscandat->iores = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kscandat->iores) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_input;
- }
-
- kscandat->kscan_base = ioremap(kscandat->iores->start,
- resource_size(kscandat->iores));
- if (!kscandat->kscan_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -EBUSY;
- goto err_release_memregion;
- }
+ kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kscandat->kscan_base))
+ return PTR_ERR(kscandat->kscan_base);
/* Get the key scanner clock */
- kscandat->clk = clk_get(&pdev->dev, NULL);
+ kscandat->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kscandat->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
- error = PTR_ERR(kscandat->clk);
- goto err_unmap;
+ return PTR_ERR(kscandat->clk);
}
/* Configure the key scanner */
error = clk_prepare_enable(kscandat->clk);
if (error)
- goto err_clk_put;
+ return error;
writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base));
writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base));
@@ -277,52 +258,20 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base));
clk_disable_unprepare(kscandat->clk);
- error = request_irq(irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat);
+ error = devm_request_irq(&pdev->dev, irq, lpc32xx_kscan_irq, 0,
+ pdev->name, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_clk_put;
+ return error;
}
error = input_register_device(kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ return error;
}
platform_set_drvdata(pdev, kscandat);
- return 0;
-
-err_free_irq:
- free_irq(irq, kscandat);
-err_clk_put:
- clk_put(kscandat->clk);
-err_unmap:
- iounmap(kscandat->kscan_base);
-err_release_memregion:
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
-err_free_input:
- input_free_device(kscandat->input);
-err_free_keymap:
- kfree(kscandat->keymap);
-err_free_mem:
- kfree(kscandat);
-
- return error;
-}
-
-static int lpc32xx_kscan_remove(struct platform_device *pdev)
-{
- struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
-
- free_irq(platform_get_irq(pdev, 0), kscandat);
- clk_put(kscandat->clk);
- iounmap(kscandat->kscan_base);
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
- input_unregister_device(kscandat->input);
- kfree(kscandat->keymap);
- kfree(kscandat);
return 0;
}
@@ -378,7 +327,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
.pm = &lpc32xx_kscan_pm_ops,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 009c82256e89..3aa2ec45bcab 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -214,13 +214,14 @@ static int mpr_touchkey_probe(struct i2c_client *client,
return -EINVAL;
}
- mpr121 = kzalloc(sizeof(struct mpr121_touchkey), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!mpr121 || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ mpr121 = devm_kzalloc(&client->dev, sizeof(*mpr121),
+ GFP_KERNEL);
+ if (!mpr121)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
mpr121->client = client;
mpr121->input_dev = input_dev;
@@ -243,44 +244,26 @@ static int mpr_touchkey_probe(struct i2c_client *client,
error = mpr121_phys_init(pdata, mpr121, client);
if (error) {
dev_err(&client->dev, "Failed to init register\n");
- goto err_free_mem;
+ return error;
}
- error = request_threaded_irq(client->irq, NULL,
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
mpr_touchkey_interrupt,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, mpr121);
device_init_wakeup(&client->dev, pdata->wakeup);
return 0;
-
-err_free_irq:
- free_irq(client->irq, mpr121);
-err_free_mem:
- input_free_device(input_dev);
- kfree(mpr121);
- return error;
-}
-
-static int mpr_touchkey_remove(struct i2c_client *client)
-{
- struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
-
- free_irq(client->irq, mpr121);
- input_unregister_device(mpr121->input_dev);
- kfree(mpr121);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -327,7 +310,6 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 6ab3e7c96329..a90d6bdc499e 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -741,37 +741,27 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
return -ENXIO;
}
- keypad = kzalloc(sizeof(struct pxa27x_keypad), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- error = -ENOMEM;
- goto failed_free;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad),
+ GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
keypad->pdata = pdata;
keypad->input_dev = input_dev;
keypad->irq = irq;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto failed_free;
- }
-
- keypad->mmio_base = ioremap(res->start, resource_size(res));
- if (keypad->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto failed_free_mem;
- }
+ keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(keypad->mmio_base))
+ return PTR_ERR(keypad->mmio_base);
- keypad->clk = clk_get(&pdev->dev, NULL);
+ keypad->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clock\n");
- error = PTR_ERR(keypad->clk);
- goto failed_free_io;
+ return PTR_ERR(keypad->clk);
}
input_dev->name = pdev->name;
@@ -802,7 +792,7 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
}
if (error) {
dev_err(&pdev->dev, "failed to build keycode\n");
- goto failed_put_clk;
+ return error;
}
keypad->row_shift = get_count_order(pdata->matrix_key_cols);
@@ -812,61 +802,26 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] |= BIT_MASK(EV_REL);
}
- error = request_irq(irq, pxa27x_keypad_irq_handler, 0,
- pdev->name, keypad);
+ error = devm_request_irq(&pdev->dev, irq, pxa27x_keypad_irq_handler,
+ 0, pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
- goto failed_put_clk;
+ return error;
}
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto failed_free_irq;
+ return error;
}
platform_set_drvdata(pdev, keypad);
device_init_wakeup(&pdev->dev, 1);
return 0;
-
-failed_free_irq:
- free_irq(irq, keypad);
-failed_put_clk:
- clk_put(keypad->clk);
-failed_free_io:
- iounmap(keypad->mmio_base);
-failed_free_mem:
- release_mem_region(res->start, resource_size(res));
-failed_free:
- input_free_device(input_dev);
- kfree(keypad);
- return error;
}
-static int pxa27x_keypad_remove(struct platform_device *pdev)
-{
- struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(keypad->irq, keypad);
- clk_put(keypad->clk);
-
- input_unregister_device(keypad->input_dev);
- iounmap(keypad->mmio_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:pxa27x-keypad");
-
#ifdef CONFIG_OF
static const struct of_device_id pxa27x_keypad_dt_match[] = {
{ .compatible = "marvell,pxa27x-keypad" },
@@ -877,7 +832,6 @@ MODULE_DEVICE_TABLE(of, pxa27x_keypad_dt_match);
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.of_match_table = of_match_ptr(pxa27x_keypad_dt_match),
@@ -888,3 +842,5 @@ module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-keypad");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index cfdca6e99779..cc87443aa2ee 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -112,8 +112,7 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm860x_onkey_suspend(struct device *dev)
+static int __maybe_unused pm860x_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -122,7 +121,7 @@ static int pm860x_onkey_suspend(struct device *dev)
chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY;
return 0;
}
-static int pm860x_onkey_resume(struct device *dev)
+static int __maybe_unused pm860x_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -131,7 +130,6 @@ static int pm860x_onkey_resume(struct device *dev)
chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops, pm860x_onkey_suspend, pm860x_onkey_resume);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e0f522516ef5..189bdc8e91a5 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -13,17 +13,15 @@
#include <linux/pm.h>
#include "ad714x.h"
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_i2c_suspend(struct device *dev)
+static int __maybe_unused ad714x_i2c_suspend(struct device *dev)
{
return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev)));
}
-static int ad714x_i2c_resume(struct device *dev)
+static int __maybe_unused ad714x_i2c_resume(struct device *dev)
{
return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 3a90b710e309..a79e50b58bf5 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -16,17 +16,15 @@
#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
#define AD714x_SPI_READ BIT(10)
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_spi_suspend(struct device *dev)
+static int __maybe_unused ad714x_spi_suspend(struct device *dev)
{
return ad714x_disable(spi_get_drvdata(to_spi_device(dev)));
}
-static int ad714x_spi_resume(struct device *dev)
+static int __maybe_unused ad714x_spi_resume(struct device *dev)
{
return ad714x_enable(spi_get_drvdata(to_spi_device(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index 416f47ddcc90..470bfd6f0830 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -105,8 +105,7 @@ static int adxl34x_i2c_remove(struct i2c_client *client)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_i2c_suspend(struct device *dev)
+static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -116,7 +115,7 @@ static int adxl34x_i2c_suspend(struct device *dev)
return 0;
}
-static int adxl34x_i2c_resume(struct device *dev)
+static int __maybe_unused adxl34x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -125,7 +124,6 @@ static int adxl34x_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_i2c_pm, adxl34x_i2c_suspend,
adxl34x_i2c_resume);
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 76dc0679d3b1..da6e76b58dab 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -94,8 +94,7 @@ static int adxl34x_spi_remove(struct spi_device *spi)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_spi_suspend(struct device *dev)
+static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -105,7 +104,7 @@ static int adxl34x_spi_suspend(struct device *dev)
return 0;
}
-static int adxl34x_spi_resume(struct device *dev)
+static int __maybe_unused adxl34x_spi_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -114,7 +113,6 @@ static int adxl34x_spi_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
adxl34x_spi_resume);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index cab87f5ce6d3..a364e109ca7c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -639,8 +639,7 @@ static int drv260x_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv260x_suspend(struct device *dev)
+static int __maybe_unused drv260x_suspend(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -672,7 +671,7 @@ out:
return ret;
}
-static int drv260x_resume(struct device *dev)
+static int __maybe_unused drv260x_resume(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -702,7 +701,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index 0f437581cc04..a021744e608c 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -406,8 +406,7 @@ static int drv2667_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv2667_suspend(struct device *dev)
+static int __maybe_unused drv2667_suspend(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -436,7 +435,7 @@ out:
return ret;
}
-static int drv2667_resume(struct device *dev)
+static int __maybe_unused drv2667_resume(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -464,7 +463,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume);
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index de21e317da32..0ac176d66a6f 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -225,8 +225,7 @@ static int gp2a_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gp2a_suspend(struct device *dev)
+static int __maybe_unused gp2a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -244,7 +243,7 @@ static int gp2a_suspend(struct device *dev)
return retval;
}
-static int gp2a_resume(struct device *dev)
+static int __maybe_unused gp2a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -261,7 +260,6 @@ static int gp2a_resume(struct device *dev)
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index afed8e2b2f94..ac1fa5f44580 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
{
- static atomic_t device_no = ATOMIC_INIT(0);
+ static atomic_t device_no = ATOMIC_INIT(-1);
const struct ims_pcu_device_info *info;
int error;
@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
}
/* Device appears to be operable, complete initialization */
- pcu->device_no = atomic_inc_return(&device_no) - 1;
+ pcu->device_no = atomic_inc_return(&device_no);
/*
* PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index d708478bc5b5..6e29349da537 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -615,8 +615,7 @@ static int kxtj9_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int kxtj9_suspend(struct device *dev)
+static int __maybe_unused kxtj9_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -631,7 +630,7 @@ static int kxtj9_suspend(struct device *dev)
return 0;
}
-static int kxtj9_resume(struct device *dev)
+static int __maybe_unused kxtj9_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -646,7 +645,6 @@ static int kxtj9_resume(struct device *dev)
mutex_unlock(&input_dev->mutex);
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 034093ee63bc..39e930c10ebb 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -309,8 +309,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max77693_haptic_suspend(struct device *dev)
+static int __maybe_unused max77693_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -323,7 +322,7 @@ static int max77693_haptic_suspend(struct device *dev)
return 0;
}
-static int max77693_haptic_resume(struct device *dev)
+static int __maybe_unused max77693_haptic_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -335,7 +334,6 @@ static int max77693_haptic_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_suspend, max77693_haptic_resume);
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 297e2a9169d5..7c49b8d23894 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -133,8 +133,7 @@ static int max8925_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8925_onkey_suspend(struct device *dev)
+static int __maybe_unused max8925_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -148,7 +147,7 @@ static int max8925_onkey_suspend(struct device *dev)
return 0;
}
-static int max8925_onkey_resume(struct device *dev)
+static int __maybe_unused max8925_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,7 +160,6 @@ static int max8925_onkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8925_onkey_pm_ops, max8925_onkey_suspend, max8925_onkey_resume);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 5b3154edf822..d0f687281339 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -378,8 +378,7 @@ static int max8997_haptic_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8997_haptic_suspend(struct device *dev)
+static int __maybe_unused max8997_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -388,7 +387,6 @@ static int max8997_haptic_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8997_haptic_pm_ops, max8997_haptic_suspend, NULL);
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index 066c5ab632c8..1f9b5ee92746 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -260,7 +260,6 @@ static int palmas_pwron_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
/**
* palmas_pwron_suspend() - suspend handler
* @dev: power button device
@@ -269,7 +268,7 @@ static int palmas_pwron_remove(struct platform_device *pdev)
*
* Return: 0
*/
-static int palmas_pwron_suspend(struct device *dev)
+static int __maybe_unused palmas_pwron_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -290,7 +289,7 @@ static int palmas_pwron_suspend(struct device *dev)
*
* Return: 0
*/
-static int palmas_pwron_resume(struct device *dev)
+static int __maybe_unused palmas_pwron_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -300,7 +299,6 @@ static int palmas_pwron_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
palmas_pwron_suspend, palmas_pwron_resume);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index e9c77a95717e..5113877153d7 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -199,8 +199,7 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_vib_suspend(struct device *dev)
+static int __maybe_unused pm8xxx_vib_suspend(struct device *dev)
{
struct pm8xxx_vib *vib = dev_get_drvdata(dev);
@@ -209,7 +208,6 @@ static int pm8xxx_vib_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index cb799177cbd5..c4ca20e63221 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -53,8 +53,7 @@ static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
-static int pmic8xxx_pwrkey_suspend(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_suspend(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -64,7 +63,7 @@ static int pmic8xxx_pwrkey_suspend(struct device *dev)
return 0;
}
-static int pmic8xxx_pwrkey_resume(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_resume(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -73,7 +72,6 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 294aa48bad50..a28ee70ff158 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -144,8 +144,7 @@ static int pwm_beeper_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_beeper_suspend(struct device *dev)
+static int __maybe_unused pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -155,7 +154,7 @@ static int pwm_beeper_suspend(struct device *dev)
return 0;
}
-static int pwm_beeper_resume(struct device *dev)
+static int __maybe_unused pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -170,6 +169,7 @@ static int pwm_beeper_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
+#ifdef CONFIG_PM_SLEEP
#define PWM_BEEPER_PM_OPS (&pwm_beeper_pm_ops)
#else
#define PWM_BEEPER_PM_OPS NULL
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 4faf9f8d1240..9d5b89befe6f 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -179,8 +179,7 @@ static int sirfsoc_pwrc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_pwrc_resume(struct device *dev)
+static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
{
struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
struct input_dev *input = pwrcdrv->input;
@@ -196,7 +195,6 @@ static int sirfsoc_pwrc_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index ccd6dd18f8f6..fc17b9592f54 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -157,8 +157,7 @@ static void twl4030_vibra_close(struct input_dev *input)
}
/*** Module ***/
-#ifdef CONFIG_PM_SLEEP
-static int twl4030_vibra_suspend(struct device *dev)
+static int __maybe_unused twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -169,12 +168,11 @@ static int twl4030_vibra_suspend(struct device *dev)
return 0;
}
-static int twl4030_vibra_resume(struct device *dev)
+static int __maybe_unused twl4030_vibra_resume(struct device *dev)
{
vibra_disable_leds();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 96e0e0c0ccb1..0e0d094df2e6 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -236,8 +236,7 @@ static void twl6040_vibra_close(struct input_dev *input)
mutex_unlock(&info->mutex);
}
-#ifdef CONFIG_PM_SLEEP
-static int twl6040_vibra_suspend(struct device *dev)
+static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -251,7 +250,6 @@ static int twl6040_vibra_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 366fc7ad5eb6..d8b46b0f2dbe 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -215,6 +215,36 @@ config MOUSE_CYAPA
To compile this driver as a module, choose M here: the module will be
called cyapa.
+config MOUSE_ELAN_I2C
+ tristate "ELAN I2C Touchpad support"
+ depends on I2C
+ help
+ This driver adds support for Elan I2C/SMbus Trackpads.
+
+ Say Y here if you have a ELAN I2C/SMbus Touchpad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called elan_i2c.
+
+config MOUSE_ELAN_I2C_I2C
+ bool "Enable I2C support"
+ depends on MOUSE_ELAN_I2C
+ default y
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a standard I2C controller.
+
+ If unsure, say Y.
+
+config MOUSE_ELAN_I2C_SMBUS
+ bool "Enable SMbus support"
+ depends on MOUSE_ELAN_I2C
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a SMbus adapter.
+
+ If unsure, say Y.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index dda507f8b3a2..560003dcac37 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
+obj-$(CONFIG_MOUSE_ELAN_I2C) += elan_i2c.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -34,3 +35,7 @@ psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
+
+elan_i2c-objs := elan_i2c_core.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_I2C) += elan_i2c_i2c.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_SMBUS) += elan_i2c_smbus.o
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index b409c3d7d4fb..1bece8cad46f 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
* Daniel Kurtz <djkurtz@chromium.org>
* Benson Leung <bleung@chromium.org>
*
- * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
* Copyright (C) 2011-2012 Google, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -206,7 +206,6 @@ struct cyapa {
struct i2c_client *client;
struct input_dev *input;
char phys[32]; /* device physical location */
- int irq;
bool irq_wake; /* irq wake is enabled */
bool smbus;
@@ -422,8 +421,8 @@ static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
*/
static int cyapa_get_state(struct cyapa *cyapa)
{
- int ret;
u8 status[BL_STATUS_SIZE];
+ int error;
cyapa->state = CYAPA_STATE_NO_DEVICE;
@@ -433,18 +432,18 @@ static int cyapa_get_state(struct cyapa *cyapa)
* If the device is in operation mode, this will be the DATA regs.
*
*/
- ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
- status);
+ error = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+ status);
/*
* On smbus systems in OP mode, the i2c_reg_read will fail with
* -ETIMEDOUT. In this case, try again using the smbus equivalent
* command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
*/
- if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
- ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+ if (cyapa->smbus && (error == -ETIMEDOUT || error == -ENXIO))
+ error = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
- if (ret != BL_STATUS_SIZE)
+ if (error != BL_STATUS_SIZE)
goto error;
if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
@@ -454,7 +453,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
cyapa->state = CYAPA_STATE_OP;
break;
default:
- ret = -EAGAIN;
+ error = -EAGAIN;
goto error;
}
} else {
@@ -468,7 +467,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
return 0;
error:
- return (ret < 0) ? ret : -EAGAIN;
+ return (error < 0) ? error : -EAGAIN;
}
/*
@@ -487,31 +486,31 @@ error:
*/
static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
{
- int ret;
+ int error;
int tries = timeout / 100;
- ret = cyapa_get_state(cyapa);
- while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+ error = cyapa_get_state(cyapa);
+ while ((error || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
msleep(100);
- ret = cyapa_get_state(cyapa);
+ error = cyapa_get_state(cyapa);
}
- return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+ return (error == -EAGAIN || error == -ETIMEDOUT) ? -ETIMEDOUT : error;
}
static int cyapa_bl_deactivate(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
- bl_deactivate);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (error)
+ return error;
/* wait for bootloader to switch to idle state; should take < 100ms */
msleep(100);
- ret = cyapa_poll_state(cyapa, 500);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 500);
+ if (error)
+ return error;
if (cyapa->state != CYAPA_STATE_BL_IDLE)
return -EAGAIN;
return 0;
@@ -532,11 +531,11 @@ static int cyapa_bl_deactivate(struct cyapa *cyapa)
*/
static int cyapa_bl_exit(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (error)
+ return error;
/*
* Wait for bootloader to exit, and operation mode to start.
@@ -548,9 +547,9 @@ static int cyapa_bl_exit(struct cyapa *cyapa)
* updated to new firmware, it must first calibrate its sensors, which
* can take up to an additional 2 seconds.
*/
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error < 0)
+ return error;
if (cyapa->state != CYAPA_STATE_OP)
return -EAGAIN;
@@ -577,10 +576,13 @@ static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
power = ret & ~PWR_MODE_MASK;
power |= power_mode & PWR_MODE_MASK;
ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
power_mode, ret);
- return ret;
+ return ret;
+ }
+
+ return 0;
}
static int cyapa_get_query_data(struct cyapa *cyapa)
@@ -637,28 +639,28 @@ static int cyapa_check_is_operational(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
static const char unique_str[] = "CYTRA";
- int ret;
+ int error;
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error)
+ return error;
switch (cyapa->state) {
case CYAPA_STATE_BL_ACTIVE:
- ret = cyapa_bl_deactivate(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_deactivate(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_BL_IDLE:
- ret = cyapa_bl_exit(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_exit(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_OP:
- ret = cyapa_get_query_data(cyapa);
- if (ret < 0)
- return ret;
+ error = cyapa_get_query_data(cyapa);
+ if (error)
+ return error;
/* only support firmware protocol gen3 */
if (cyapa->gen != CYAPA_GEN3) {
@@ -753,18 +755,42 @@ static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
return ret;
}
+static int cyapa_open(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+ struct i2c_client *client = cyapa->client;
+ int error;
+
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error) {
+ dev_err(&client->dev, "set active power failed: %d\n", error);
+ return error;
+ }
+
+ enable_irq(client->irq);
+ return 0;
+}
+
+static void cyapa_close(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+
+ disable_irq(cyapa->client->irq);
+ cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+}
+
static int cyapa_create_input_dev(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
- int ret;
struct input_dev *input;
+ int error;
if (!cyapa->physical_size_x || !cyapa->physical_size_y)
return -EINVAL;
- input = cyapa->input = input_allocate_device();
+ input = devm_input_allocate_device(dev);
if (!input) {
- dev_err(dev, "allocate memory for input device failed\n");
+ dev_err(dev, "failed to allocate memory for input device.\n");
return -ENOMEM;
}
@@ -772,14 +798,17 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
input->phys = cyapa->phys;
input->id.bustype = BUS_I2C;
input->id.version = 1;
- input->id.product = 0; /* means any product in eventcomm. */
+ input->id.product = 0; /* Means any product in eventcomm. */
input->dev.parent = &cyapa->client->dev;
+ input->open = cyapa_open;
+ input->close = cyapa_close;
+
input_set_drvdata(input, cyapa);
__set_bit(EV_ABS, input->evbit);
- /* finger position */
+ /* Finger position */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
@@ -801,35 +830,25 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- /* handle pointer emulation and unused slots in core */
- ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
- INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
- if (ret) {
- dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
- goto err_free_device;
+ /* Handle pointer emulation and unused slots in core */
+ error = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
}
- /* Register the device in input subsystem */
- ret = input_register_device(input);
- if (ret) {
- dev_err(dev, "input device register failed, %d\n", ret);
- goto err_free_device;
- }
+ cyapa->input = input;
return 0;
-
-err_free_device:
- input_free_device(input);
- cyapa->input = NULL;
- return ret;
}
static int cyapa_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
- int ret;
- u8 adapter_func;
- struct cyapa *cyapa;
struct device *dev = &client->dev;
+ struct cyapa *cyapa;
+ u8 adapter_func;
+ int error;
adapter_func = cyapa_check_adapter_functionality(client);
if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
@@ -837,11 +856,9 @@ static int cyapa_probe(struct i2c_client *client,
return -EIO;
}
- cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
- if (!cyapa) {
- dev_err(dev, "allocate memory for cyapa failed\n");
+ cyapa = devm_kzalloc(dev, sizeof(struct cyapa), GFP_KERNEL);
+ if (!cyapa)
return -ENOMEM;
- }
cyapa->gen = CYAPA_GEN3;
cyapa->client = client;
@@ -852,67 +869,61 @@ static int cyapa_probe(struct i2c_client *client,
/* i2c isn't supported, use smbus */
if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
cyapa->smbus = true;
+
cyapa->state = CYAPA_STATE_NO_DEVICE;
- ret = cyapa_check_is_operational(cyapa);
- if (ret) {
- dev_err(dev, "device not operational, %d\n", ret);
- goto err_mem_free;
- }
- ret = cyapa_create_input_dev(cyapa);
- if (ret) {
- dev_err(dev, "create input_dev instance failed, %d\n", ret);
- goto err_mem_free;
+ error = cyapa_check_is_operational(cyapa);
+ if (error) {
+ dev_err(dev, "device not operational, %d\n", error);
+ return error;
}
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret) {
- dev_err(dev, "set active power failed, %d\n", ret);
- goto err_unregister_device;
+ /* Power down the device until we need it */
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ if (error) {
+ dev_err(dev, "failed to quiesce the device: %d\n", error);
+ return error;
}
- cyapa->irq = client->irq;
- ret = request_threaded_irq(cyapa->irq,
- NULL,
- cyapa_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "cyapa",
- cyapa);
- if (ret) {
- dev_err(dev, "IRQ request failed: %d\n, ", ret);
- goto err_unregister_device;
+ error = cyapa_create_input_dev(cyapa);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cyapa_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyapa", cyapa);
+ if (error) {
+ dev_err(dev, "failed to request threaded irq: %d\n", error);
+ return error;
}
- return 0;
+ /* Disable IRQ until the device is opened */
+ disable_irq(client->irq);
-err_unregister_device:
- input_unregister_device(cyapa->input);
-err_mem_free:
- kfree(cyapa);
-
- return ret;
-}
-
-static int cyapa_remove(struct i2c_client *client)
-{
- struct cyapa *cyapa = i2c_get_clientdata(client);
-
- free_irq(cyapa->irq, cyapa);
- input_unregister_device(cyapa->input);
- cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
- kfree(cyapa);
+ /* Register the device in input subsystem */
+ error = input_register_device(cyapa->input);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d\n", error);
+ return error;
+ }
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cyapa_suspend(struct device *dev)
+static int __maybe_unused cyapa_suspend(struct device *dev)
{
- int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
u8 power_mode;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
- disable_irq(cyapa->irq);
+ error = mutex_lock_interruptible(&input->mutex);
+ if (error)
+ return error;
+
+ disable_irq(client->irq);
/*
* Set trackpad device to idle mode if wakeup is allowed,
@@ -920,31 +931,44 @@ static int cyapa_suspend(struct device *dev)
*/
power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
: PWR_MODE_OFF;
- ret = cyapa_set_power_mode(cyapa, power_mode);
- if (ret < 0)
- dev_err(dev, "set power mode failed, %d\n", ret);
+ error = cyapa_set_power_mode(cyapa, power_mode);
+ if (error)
+ dev_err(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
if (device_may_wakeup(dev))
- cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+ cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+
+ mutex_unlock(&input->mutex);
+
return 0;
}
-static int cyapa_resume(struct device *dev)
+static int __maybe_unused cyapa_resume(struct device *dev)
{
- int ret;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
+ u8 power_mode;
+ int error;
+
+ mutex_lock(&input->mutex);
if (device_may_wakeup(dev) && cyapa->irq_wake)
- disable_irq_wake(cyapa->irq);
+ disable_irq_wake(client->irq);
+
+ power_mode = input->users ? PWR_MODE_FULL_ACTIVE : PWR_MODE_OFF;
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error)
+ dev_warn(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
+
+ enable_irq(client->irq);
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret)
- dev_warn(dev, "resume active power failed, %d\n", ret);
+ mutex_unlock(&input->mutex);
- enable_irq(cyapa->irq);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
@@ -962,7 +986,6 @@ static struct i2c_driver cyapa_driver = {
},
.probe = cyapa_probe,
- .remove = cyapa_remove,
.id_table = cyapa_id_table,
};
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
new file mode 100644
index 000000000000..2e838626205f
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c.h
@@ -0,0 +1,86 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#ifndef _ELAN_I2C_H
+#define _ELAN_i2C_H
+
+#include <linux/types.h>
+
+#define ETP_ENABLE_ABS 0x0001
+#define ETP_ENABLE_CALIBRATE 0x0002
+#define ETP_DISABLE_CALIBRATE 0x0000
+#define ETP_DISABLE_POWER 0x0001
+
+/* IAP Firmware handling */
+#define ETP_FW_NAME "elan_i2c.bin"
+#define ETP_IAP_START_ADDR 0x0083
+#define ETP_FW_IAP_PAGE_ERR (1 << 5)
+#define ETP_FW_IAP_INTF_ERR (1 << 4)
+#define ETP_FW_PAGE_SIZE 64
+#define ETP_FW_PAGE_COUNT 768
+#define ETP_FW_SIZE (ETP_FW_PAGE_SIZE * ETP_FW_PAGE_COUNT)
+
+struct i2c_client;
+struct completion;
+
+enum tp_mode {
+ IAP_MODE = 1,
+ MAIN_MODE
+};
+
+struct elan_transport_ops {
+ int (*initialize)(struct i2c_client *client);
+ int (*sleep_control)(struct i2c_client *, bool sleep);
+ int (*power_control)(struct i2c_client *, bool enable);
+ int (*set_mode)(struct i2c_client *client, u8 mode);
+
+ int (*calibrate)(struct i2c_client *client);
+ int (*calibrate_result)(struct i2c_client *client, u8 *val);
+
+ int (*get_baseline_data)(struct i2c_client *client,
+ bool max_baseliune, u8 *value);
+
+ int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
+ int (*get_sm_version)(struct i2c_client *client, u8 *version);
+ int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
+ int (*get_product_id)(struct i2c_client *client, u8 *id);
+
+ int (*get_max)(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y);
+ int (*get_resolution)(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y);
+ int (*get_num_traces)(struct i2c_client *client,
+ unsigned int *x_tracenum,
+ unsigned int *y_tracenum);
+
+ int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
+ int (*iap_reset)(struct i2c_client *client);
+
+ int (*prepare_fw_update)(struct i2c_client *client);
+ int (*write_fw_block)(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx);
+ int (*finish_fw_update)(struct i2c_client *client,
+ struct completion *reset_done);
+
+ int (*get_report)(struct i2c_client *client, u8 *report);
+};
+
+extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops;
+
+#endif /* _ELAN_I2C_H */
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
new file mode 100644
index 000000000000..0cb2be48d537
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -0,0 +1,1137 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/uaccess.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+#define DRIVER_NAME "elan_i2c"
+#define ELAN_DRIVER_VERSION "1.5.5"
+#define ETP_PRESSURE_OFFSET 25
+#define ETP_MAX_PRESSURE 255
+#define ETP_FWIDTH_REDUCE 90
+#define ETP_FINGER_WIDTH 15
+#define ETP_RETRY_COUNT 3
+
+#define ETP_MAX_FINGERS 5
+#define ETP_FINGER_DATA_LEN 5
+#define ETP_REPORT_ID 0x5D
+#define ETP_REPORT_ID_OFFSET 2
+#define ETP_TOUCH_INFO_OFFSET 3
+#define ETP_FINGER_DATA_OFFSET 4
+#define ETP_MAX_REPORT_LEN 34
+
+/* The main device structure */
+struct elan_tp_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct regulator *vcc;
+
+ const struct elan_transport_ops *ops;
+
+ /* for fw update */
+ struct completion fw_completion;
+ bool in_fw_update;
+
+ struct mutex sysfs_mutex;
+
+ unsigned int max_x;
+ unsigned int max_y;
+ unsigned int width_x;
+ unsigned int width_y;
+ unsigned int x_res;
+ unsigned int y_res;
+
+ u8 product_id;
+ u8 fw_version;
+ u8 sm_version;
+ u8 iap_version;
+ u16 fw_checksum;
+
+ u8 mode;
+
+ bool irq_wake;
+
+ u8 min_baseline;
+ u8 max_baseline;
+ bool baseline_ready;
+};
+
+static int elan_enable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ do {
+ error = data->ops->power_control(data->client, true);
+ if (error >= 0)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_disable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->power_control(data->client, false);
+ if (!error) {
+ error = regulator_disable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to disable regulator: %d\n",
+ error);
+ /* Attempt to power the chip back up */
+ data->ops->power_control(data->client, true);
+ break;
+ }
+
+ return 0;
+ }
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_sleep(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->sleep_control(data->client, true);
+ if (!error)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int __elan_initialize(struct elan_tp_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+
+ error = data->ops->initialize(client);
+ if (error) {
+ dev_err(&client->dev, "device initialize failed: %d\n", error);
+ return error;
+ }
+
+ data->mode |= ETP_ENABLE_ABS;
+ error = data->ops->set_mode(client, data->mode);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to switch to absolute mode: %d\n", error);
+ return error;
+ }
+
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_initialize(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = __elan_initialize(data);
+ if (!error)
+ return 0;
+
+ repeat--;
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_query_device_info(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, false, &data->fw_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(data->client, false,
+ &data->fw_checksum);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->sm_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, true, &data->iap_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static unsigned int elan_convert_resolution(u8 val)
+{
+ /*
+ * (value from firmware) * 10 + 790 = dpi
+ *
+ * We also have to convert dpi to dots/mm (*10/254 to avoid floating
+ * point).
+ */
+
+ return ((int)(char)val * 10 + 790) * 10 / 254;
+}
+
+static int elan_query_device_parameters(struct elan_tp_data *data)
+{
+ unsigned int x_traces, y_traces;
+ u8 hw_x_res, hw_y_res;
+ int error;
+
+ error = data->ops->get_max(data->client, &data->max_x, &data->max_y);
+ if (error)
+ return error;
+
+ error = data->ops->get_num_traces(data->client, &x_traces, &y_traces);
+ if (error)
+ return error;
+
+ data->width_x = data->max_x / x_traces;
+ data->width_y = data->max_y / y_traces;
+
+ error = data->ops->get_resolution(data->client, &hw_x_res, &hw_y_res);
+ if (error)
+ return error;
+
+ data->x_res = elan_convert_resolution(hw_x_res);
+ data->y_res = elan_convert_resolution(hw_y_res);
+
+ return 0;
+}
+
+/*
+ **********************************************************
+ * IAP firmware updater related routines
+ **********************************************************
+ */
+static int elan_write_fw_block(struct elan_tp_data *data,
+ const u8 *page, u16 checksum, int idx)
+{
+ int retry = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->write_fw_block(data->client,
+ page, checksum, idx);
+ if (!error)
+ return 0;
+
+ dev_dbg(&data->client->dev,
+ "IAP retrying page %d (error: %d)\n", idx, error);
+ } while (--retry > 0);
+
+ return error;
+}
+
+static int __elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int i, j;
+ int error;
+ u16 iap_start_addr;
+ u16 boot_page_count;
+ u16 sw_checksum = 0, fw_checksum = 0;
+
+ error = data->ops->prepare_fw_update(client);
+ if (error)
+ return error;
+
+ iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
+
+ boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+ for (i = boot_page_count; i < ETP_FW_PAGE_COUNT; i++) {
+ u16 checksum = 0;
+ const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+
+ for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+ checksum += ((page[j + 1] << 8) | page[j]);
+
+ error = elan_write_fw_block(data, page, checksum, i);
+ if (error) {
+ dev_err(dev, "write page %d fail: %d\n", i, error);
+ return error;
+ }
+
+ sw_checksum += checksum;
+ }
+
+ /* Wait WDT reset and power on reset */
+ msleep(600);
+
+ error = data->ops->finish_fw_update(client, &data->fw_completion);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(client, true, &fw_checksum);
+ if (error)
+ return error;
+
+ if (sw_checksum != fw_checksum) {
+ dev_err(dev, "checksum diff sw=[%04X], fw=[%04X]\n",
+ sw_checksum, fw_checksum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ int retval;
+
+ dev_dbg(&client->dev, "Starting firmware update....\n");
+
+ disable_irq(client->irq);
+ data->in_fw_update = true;
+
+ retval = __elan_update_firmware(data, fw);
+ if (retval) {
+ dev_err(&client->dev, "firmware update failed: %d\n", retval);
+ data->ops->iap_reset(client);
+ } else {
+ /* Reinitialize TP after fw is updated */
+ elan_initialize(data);
+ elan_query_device_info(data);
+ }
+
+ data->in_fw_update = false;
+ enable_irq(client->irq);
+
+ return retval;
+}
+
+/*
+ *******************************************************************
+ * SYSFS attributes
+ *******************************************************************
+ */
+static ssize_t elan_sysfs_read_fw_checksum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "0x%04x\n", data->fw_checksum);
+}
+
+static ssize_t elan_sysfs_read_product_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->product_id);
+}
+
+static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->fw_version);
+}
+
+static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->sm_version);
+}
+
+static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->iap_version);
+}
+
+static ssize_t elan_sysfs_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, ETP_FW_NAME, dev);
+ if (error) {
+ dev_err(dev, "cannot load firmware %s: %d\n",
+ ETP_FW_NAME, error);
+ return error;
+ }
+
+ /* Firmware must be exactly PAGE_NUM * PAGE_SIZE bytes */
+ if (fw->size != ETP_FW_SIZE) {
+ dev_err(dev, "invalid firmware size = %zu, expected %d.\n",
+ fw->size, ETP_FW_SIZE);
+ error = -EBADF;
+ goto out_release_fw;
+ }
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ goto out_release_fw;
+
+ error = elan_update_firmware(data, fw);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+out_release_fw:
+ release_firmware(fw);
+ return error ?: count;
+}
+
+static ssize_t calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int tries = 20;
+ int retval;
+ int error;
+ u8 val[3];
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(client, data->mode);
+ if (retval) {
+ dev_err(dev, "failed to enable calibration mode: %d\n",
+ retval);
+ goto out;
+ }
+
+ retval = data->ops->calibrate(client);
+ if (retval) {
+ dev_err(dev, "failed to start calibration: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ val[0] = 0xff;
+ do {
+ /* Wait 250ms before checking if calibration has completed. */
+ msleep(250);
+
+ retval = data->ops->calibrate_result(client, val);
+ if (retval)
+ dev_err(dev, "failed to check calibration result: %d\n",
+ retval);
+ else if (val[0] == 0)
+ break; /* calibration done */
+
+ } while (--tries);
+
+ if (tries == 0) {
+ dev_err(dev, "failed to calibrate. Timeout.\n");
+ retval = -ETIMEDOUT;
+ }
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "failed to disable calibration mode: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t elan_sysfs_read_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ enum tp_mode mode;
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = data->ops->iap_get_mode(data->client, &mode);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+ if (error)
+ return error;
+
+ return sprintf(buf, "%d\n", (int)mode);
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL);
+static DEVICE_ATTR(firmware_version, S_IRUGO, elan_sysfs_read_fw_ver, NULL);
+static DEVICE_ATTR(sample_version, S_IRUGO, elan_sysfs_read_sm_ver, NULL);
+static DEVICE_ATTR(iap_version, S_IRUGO, elan_sysfs_read_iap_ver, NULL);
+static DEVICE_ATTR(fw_checksum, S_IRUGO, elan_sysfs_read_fw_checksum, NULL);
+static DEVICE_ATTR(mode, S_IRUGO, elan_sysfs_read_mode, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, elan_sysfs_update_fw);
+
+static DEVICE_ATTR_WO(calibrate);
+
+static struct attribute *elan_sysfs_entries[] = {
+ &dev_attr_product_id.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_sample_version.attr,
+ &dev_attr_iap_version.attr,
+ &dev_attr_fw_checksum.attr,
+ &dev_attr_calibrate.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_sysfs_group = {
+ .attrs = elan_sysfs_entries,
+};
+
+static ssize_t acquire_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->baseline_ready = false;
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(data->client, data->mode);
+ if (retval) {
+ dev_err(dev, "Failed to enable calibration mode to get baseline: %d\n",
+ retval);
+ goto out;
+ }
+
+ msleep(250);
+
+ retval = data->ops->get_baseline_data(data->client, true,
+ &data->max_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read max baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ retval = data->ops->get_baseline_data(data->client, false,
+ &data->min_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read min baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ data->baseline_ready = true;
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "Failed to disable calibration mode after acquiring baseline: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->min_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+static ssize_t max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->max_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+
+static DEVICE_ATTR_WO(acquire);
+static DEVICE_ATTR_RO(min);
+static DEVICE_ATTR_RO(max);
+
+static struct attribute *elan_baseline_sysfs_entries[] = {
+ &dev_attr_acquire.attr,
+ &dev_attr_min.attr,
+ &dev_attr_max.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_baseline_sysfs_group = {
+ .name = "baseline",
+ .attrs = elan_baseline_sysfs_entries,
+};
+
+static const struct attribute_group *elan_sysfs_groups[] = {
+ &elan_sysfs_group,
+ &elan_baseline_sysfs_group,
+ NULL
+};
+
+/*
+ ******************************************************************
+ * Elan isr functions
+ ******************************************************************
+ */
+static void elan_report_contact(struct elan_tp_data *data,
+ int contact_num, bool contact_valid,
+ u8 *finger_data)
+{
+ struct input_dev *input = data->input;
+ unsigned int pos_x, pos_y;
+ unsigned int pressure, mk_x, mk_y;
+ unsigned int area_x, area_y, major, minor, new_pressure;
+
+
+ if (contact_valid) {
+ pos_x = ((finger_data[0] & 0xf0) << 4) |
+ finger_data[1];
+ pos_y = ((finger_data[0] & 0x0f) << 8) |
+ finger_data[2];
+ mk_x = (finger_data[3] & 0x0f);
+ mk_y = (finger_data[3] >> 4);
+ pressure = finger_data[4];
+
+ if (pos_x > data->max_x || pos_y > data->max_y) {
+ dev_dbg(input->dev.parent,
+ "[%d] x=%d y=%d over max (%d, %d)",
+ contact_num, pos_x, pos_y,
+ data->max_x, data->max_y);
+ return;
+ }
+
+ /*
+ * To avoid treating large finger as palm, let's reduce the
+ * width x and y per trace.
+ */
+ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+ area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+ major = max(area_x, area_y);
+ minor = min(area_x, area_y);
+
+ new_pressure = pressure + ETP_PRESSURE_OFFSET;
+ if (new_pressure > ETP_MAX_PRESSURE)
+ new_pressure = ETP_MAX_PRESSURE;
+
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, pos_x);
+ input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
+ input_report_abs(input, ABS_MT_PRESSURE, new_pressure);
+ input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+ } else {
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ }
+}
+
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+{
+ struct input_dev *input = data->input;
+ u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
+ int i;
+ u8 tp_info = packet[ETP_TOUCH_INFO_OFFSET];
+ bool contact_valid;
+
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ contact_valid = tp_info & (1U << (3 + i));
+ elan_report_contact(data, i, contact_valid, finger_data);
+
+ if (contact_valid)
+ finger_data += ETP_FINGER_DATA_LEN;
+ }
+
+ input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_mt_report_pointer_emulation(input, true);
+ input_sync(input);
+}
+
+static irqreturn_t elan_isr(int irq, void *dev_id)
+{
+ struct elan_tp_data *data = dev_id;
+ struct device *dev = &data->client->dev;
+ int error;
+ u8 report[ETP_MAX_REPORT_LEN];
+
+ /*
+ * When device is connected to i2c bus, when all IAP page writes
+ * complete, the driver will receive interrupt and must read
+ * 0000 to confirm that IAP is finished.
+ */
+ if (data->in_fw_update) {
+ complete(&data->fw_completion);
+ goto out;
+ }
+
+ error = data->ops->get_report(data->client, report);
+ if (error)
+ goto out;
+
+ if (report[ETP_REPORT_ID_OFFSET] != ETP_REPORT_ID)
+ dev_err(dev, "invalid report id data (%x)\n",
+ report[ETP_REPORT_ID_OFFSET]);
+ else
+ elan_report_absolute(data, report);
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ ******************************************************************
+ * Elan initialization functions
+ ******************************************************************
+ */
+static int elan_setup_input_device(struct elan_tp_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input;
+ unsigned int max_width = max(data->width_x, data->width_y);
+ unsigned int min_width = min(data->width_x, data->width_y);
+ int error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Elan Touchpad";
+ input->id.bustype = BUS_I2C;
+ input_set_drvdata(input, data);
+
+ error = input_mt_init_slots(input, ETP_MAX_FINGERS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_LEFT, input->keybit);
+
+ /* Set up ST parameters */
+ input_set_abs_params(input, ABS_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_X, data->x_res);
+ input_abs_set_res(input, ABS_Y, data->y_res);
+ input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+
+ /* And MT parameters */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, data->x_res);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0,
+ ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
+ ETP_FINGER_WIDTH * max_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
+ ETP_FINGER_WIDTH * min_width, 0, 0);
+
+ data->input = input;
+
+ return 0;
+}
+
+static void elan_disable_regulator(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ regulator_disable(data->vcc);
+}
+
+static void elan_remove_sysfs_groups(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ sysfs_remove_groups(&data->client->dev.kobj, elan_sysfs_groups);
+}
+
+static int elan_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ const struct elan_transport_ops *transport_ops;
+ struct device *dev = &client->dev;
+ struct elan_tp_data *data;
+ unsigned long irqflags;
+ int error;
+
+ if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_I2C) &&
+ i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ transport_ops = &elan_i2c_ops;
+ } else if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) &&
+ i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ transport_ops = &elan_smbus_ops;
+ } else {
+ dev_err(dev, "not a supported I2C/SMBus adapter\n");
+ return -EIO;
+ }
+
+ data = devm_kzalloc(&client->dev, sizeof(struct elan_tp_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ data->ops = transport_ops;
+ data->client = client;
+ init_completion(&data->fw_completion);
+ mutex_init(&data->sysfs_mutex);
+
+ data->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ error = PTR_ERR(data->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vcc' regulator: %d\n",
+ error);
+ return error;
+ }
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_disable_regulator, data);
+ if (error) {
+ regulator_disable(data->vcc);
+ dev_err(&client->dev,
+ "Failed to add disable regulator action: %d\n",
+ error);
+ return error;
+ }
+
+ /* Initialize the touchpad. */
+ error = elan_initialize(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_info(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_parameters(data);
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev,
+ "Elan Touchpad Information:\n"
+ " Module product ID: 0x%04x\n"
+ " Firmware Version: 0x%04x\n"
+ " Sample Version: 0x%04x\n"
+ " IAP Version: 0x%04x\n"
+ " Max ABS X,Y: %d,%d\n"
+ " Width X,Y: %d,%d\n"
+ " Resolution X,Y: %d,%d (dots/mm)\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version,
+ data->max_x, data->max_y,
+ data->width_x, data->width_y,
+ data->x_res, data->y_res);
+
+ /* Set up input device properties based on queried parameters. */
+ error = elan_setup_input_device(data);
+ if (error)
+ return error;
+
+ /*
+ * Systems using device tree should set up interrupt via DTS,
+ * the rest will use the default falling edge interrupts.
+ */
+ irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, elan_isr,
+ irqflags | IRQF_ONESHOT,
+ client->name, data);
+ if (error) {
+ dev_err(&client->dev, "cannot register irq=%d\n", client->irq);
+ return error;
+ }
+
+ error = sysfs_create_groups(&client->dev.kobj, elan_sysfs_groups);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_remove_sysfs_groups, data);
+ if (error) {
+ elan_remove_sysfs_groups(data);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(data->input);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up wakeup via DTS,
+ * the rest will configure device as wakeup source by default.
+ */
+ if (!client->dev.of_node)
+ device_init_wakeup(&client->dev, true);
+
+ return 0;
+}
+
+static int __maybe_unused elan_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ /*
+ * We are taking the mutex to make sure sysfs operations are
+ * complete before we attempt to bring the device into low[er]
+ * power mode.
+ */
+ ret = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ disable_irq(client->irq);
+
+ if (device_may_wakeup(dev)) {
+ ret = elan_sleep(data);
+ /* Enable wake from IRQ */
+ data->irq_wake = (enable_irq_wake(client->irq) == 0);
+ } else {
+ ret = elan_disable_power(data);
+ }
+
+ mutex_unlock(&data->sysfs_mutex);
+ return ret;
+}
+
+static int __maybe_unused elan_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+
+ if (device_may_wakeup(dev) && data->irq_wake) {
+ disable_irq_wake(client->irq);
+ data->irq_wake = false;
+ }
+
+ error = elan_enable_power(data);
+ if (error)
+ dev_err(dev, "power up when resuming failed: %d\n", error);
+
+ error = elan_initialize(data);
+ if (error)
+ dev_err(dev, "initialize when resuming failed: %d\n", error);
+
+ enable_irq(data->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elan_pm_ops, elan_suspend, elan_resume);
+
+static const struct i2c_device_id elan_id[] = {
+ { DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, elan_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id elan_of_match[] = {
+ { .compatible = "elan,ekth3000" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, elan_of_match);
+#endif
+
+static struct i2c_driver elan_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &elan_pm_ops,
+ .acpi_match_table = ACPI_PTR(elan_acpi_id),
+ .of_match_table = of_match_ptr(elan_of_match),
+ },
+ .probe = elan_probe,
+ .id_table = elan_id,
+};
+
+module_i2c_driver(elan_driver);
+
+MODULE_AUTHOR("Duson Lin <dusonlin@emc.com.tw>");
+MODULE_DESCRIPTION("Elan I2C/SMBus Touchpad driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ELAN_DRIVER_VERSION);
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
new file mode 100644
index 000000000000..97d4937fc244
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -0,0 +1,611 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - I2C interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+/* Elan i2c commands */
+#define ETP_I2C_RESET 0x0100
+#define ETP_I2C_WAKE_UP 0x0800
+#define ETP_I2C_SLEEP 0x0801
+#define ETP_I2C_DESC_CMD 0x0001
+#define ETP_I2C_REPORT_DESC_CMD 0x0002
+#define ETP_I2C_STAND_CMD 0x0005
+#define ETP_I2C_UNIQUEID_CMD 0x0101
+#define ETP_I2C_FW_VERSION_CMD 0x0102
+#define ETP_I2C_SM_VERSION_CMD 0x0103
+#define ETP_I2C_XY_TRACENUM_CMD 0x0105
+#define ETP_I2C_MAX_X_AXIS_CMD 0x0106
+#define ETP_I2C_MAX_Y_AXIS_CMD 0x0107
+#define ETP_I2C_RESOLUTION_CMD 0x0108
+#define ETP_I2C_IAP_VERSION_CMD 0x0110
+#define ETP_I2C_SET_CMD 0x0300
+#define ETP_I2C_POWER_CMD 0x0307
+#define ETP_I2C_FW_CHECKSUM_CMD 0x030F
+#define ETP_I2C_IAP_CTRL_CMD 0x0310
+#define ETP_I2C_IAP_CMD 0x0311
+#define ETP_I2C_IAP_RESET_CMD 0x0314
+#define ETP_I2C_IAP_CHECKSUM_CMD 0x0315
+#define ETP_I2C_CALIBRATE_CMD 0x0316
+#define ETP_I2C_MAX_BASELINE_CMD 0x0317
+#define ETP_I2C_MIN_BASELINE_CMD 0x0318
+
+#define ETP_I2C_REPORT_LEN 34
+#define ETP_I2C_DESC_LENGTH 30
+#define ETP_I2C_REPORT_DESC_LENGTH 158
+#define ETP_I2C_INF_LENGTH 2
+#define ETP_I2C_IAP_PASSWORD 0x1EA5
+#define ETP_I2C_IAP_RESET 0xF0F0
+#define ETP_I2C_MAIN_MODE_ON (1 << 9)
+#define ETP_I2C_IAP_REG_L 0x01
+#define ETP_I2C_IAP_REG_H 0x06
+
+static int elan_i2c_read_block(struct i2c_client *client,
+ u16 reg, u8 *val, u16 len)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = (client->flags & I2C_M_TEN) | I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ return ret == ARRAY_SIZE(msgs) ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_read_cmd(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int retval;
+
+ retval = elan_i2c_read_block(client, reg, val, ETP_I2C_INF_LENGTH);
+ if (retval < 0) {
+ dev_err(&client->dev, "reading cmd (0x%04x) fail.\n", reg);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_cmd(struct i2c_client *client, u16 reg, u16 cmd)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ cpu_to_le16(cmd),
+ };
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ return ret == 1 ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_initialize(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u8 val[256];
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ }
+
+ /* Wait for the device to reset */
+ msleep(100);
+
+ /* get reset acknowledgement 0000 */
+ error = i2c_master_recv(client, val, ETP_I2C_INF_LENGTH);
+ if (error < 0) {
+ dev_err(dev, "failed to read reset response: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_DESC_CMD,
+ val, ETP_I2C_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "cannot get device descriptor: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_REPORT_DESC_CMD,
+ val, ETP_I2C_REPORT_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "fetching report descriptor failed.: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_sleep_control(struct i2c_client *client, bool sleep)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD,
+ sleep ? ETP_I2C_SLEEP : ETP_I2C_WAKE_UP);
+}
+
+static int elan_i2c_power_control(struct i2c_client *client, bool enable)
+{
+ u8 val[2];
+ u16 reg;
+ int error;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_POWER_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read current power state: %d\n",
+ error);
+ return error;
+ }
+
+ reg = le16_to_cpup((__le16 *)val);
+ if (enable)
+ reg &= ~ETP_DISABLE_POWER;
+ else
+ reg |= ETP_DISABLE_POWER;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_POWER_CMD, reg);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write current power state: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_mode(struct i2c_client *client, u8 mode)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_SET_CMD, mode);
+}
+
+
+static int elan_i2c_calibrate(struct i2c_client *client)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_CALIBRATE_CMD, 1);
+}
+
+static int elan_i2c_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ return elan_i2c_read_block(client, ETP_I2C_CALIBRATE_CMD, val, 1);
+}
+
+static int elan_i2c_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ max_baseline ? ETP_I2C_MAX_BASELINE_CMD :
+ ETP_I2C_MIN_BASELINE_CMD,
+ val);
+ if (error)
+ return error;
+
+ *value = le16_to_cpup((__le16 *)val);
+
+ return 0;
+}
+
+static int elan_i2c_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_VERSION_CMD :
+ ETP_I2C_FW_VERSION_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_SM_VERSION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_UNIQUEID_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_CHECKSUM_CMD :
+ ETP_I2C_FW_CHECKSUM_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = le16_to_cpup((__le16 *)val);
+ return 0;
+}
+
+static int elan_i2c_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_X_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get X dimension: %d\n", error);
+ return error;
+ }
+
+ *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get Y dimension: %d\n", error);
+ return error;
+ }
+
+ *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ return 0;
+}
+
+static int elan_i2c_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[0];
+ *hw_res_y = val[1];
+
+ return 0;
+}
+
+static int elan_i2c_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[0] - 1;
+ *y_traces = val[1] - 1;
+
+ return 0;
+}
+
+static int elan_i2c_iap_get_mode(struct i2c_client *client, enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read iap control register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = le16_to_cpup((__le16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_I2C_MAIN_MODE_ON) ? MAIN_MODE : IAP_MODE;
+
+ return 0;
+}
+
+static int elan_i2c_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_RESET_CMD,
+ ETP_I2C_IAP_RESET);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_flash_key(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_CMD,
+ ETP_I2C_IAP_PASSWORD);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == IAP_MODE) {
+ /* Reset IC */
+ error = elan_i2c_iap_reset(client);
+ if (error)
+ return error;
+
+ msleep(30);
+ }
+
+ /* Set flash key*/
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(mode == MAIN_MODE ? 100 : 30);
+
+ /* Check if we are in IAP mode or not */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+ dev_err(dev, "wrong mode: %d\n", mode);
+ return -EIO;
+ }
+
+ /* Set flash key again */
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(30);
+
+ /* read back to check we actually enabled successfully. */
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CMD, val);
+ if (error) {
+ dev_err(dev, "cannot read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = le16_to_cpup((__le16 *)val);
+ if (password != ETP_I2C_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password: 0x%X\n", password);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ u8 page_store[ETP_FW_PAGE_SIZE + 4];
+ u8 val[3];
+ u16 result;
+ int ret, error;
+
+ page_store[0] = ETP_I2C_IAP_REG_L;
+ page_store[1] = ETP_I2C_IAP_REG_H;
+ memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+ /* recode checksum at last two bytes */
+ put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+
+ ret = i2c_master_send(client, page_store, sizeof(page_store));
+ if (ret != sizeof(page_store)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to write page %d: %d\n", idx, error);
+ return error;
+ }
+
+ /* Wait for F/W to update one page ROM data. */
+ msleep(20);
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(dev, "Failed to read IAP write result: %d\n", error);
+ return error;
+ }
+
+ result = le16_to_cpup((__le16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_finish_fw_update(struct i2c_client *client,
+ struct completion *completion)
+{
+ struct device *dev = &client->dev;
+ long ret;
+ int error;
+ int len;
+ u8 buffer[ETP_I2C_INF_LENGTH];
+
+ reinit_completion(completion);
+ enable_irq(client->irq);
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (!error)
+ ret = wait_for_completion_interruptible_timeout(completion,
+ msecs_to_jiffies(300));
+ disable_irq(client->irq);
+
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ } else if (ret == 0) {
+ dev_err(dev, "timeout waiting for device reset\n");
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ error = ret;
+ dev_err(dev, "error waiting for device reset: %d\n", error);
+ return error;
+ }
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
+ if (len != ETP_I2C_INF_LENGTH) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read INT signal: %d (%d)\n",
+ error, len);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_I2C_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_I2C_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+const struct elan_transport_ops elan_i2c_ops = {
+ .initialize = elan_i2c_initialize,
+ .sleep_control = elan_i2c_sleep_control,
+ .power_control = elan_i2c_power_control,
+ .set_mode = elan_i2c_set_mode,
+
+ .calibrate = elan_i2c_calibrate,
+ .calibrate_result = elan_i2c_calibrate_result,
+
+ .get_baseline_data = elan_i2c_get_baseline_data,
+
+ .get_version = elan_i2c_get_version,
+ .get_sm_version = elan_i2c_get_sm_version,
+ .get_product_id = elan_i2c_get_product_id,
+ .get_checksum = elan_i2c_get_checksum,
+
+ .get_max = elan_i2c_get_max,
+ .get_resolution = elan_i2c_get_resolution,
+ .get_num_traces = elan_i2c_get_num_traces,
+
+ .iap_get_mode = elan_i2c_iap_get_mode,
+ .iap_reset = elan_i2c_iap_reset,
+
+ .prepare_fw_update = elan_i2c_prepare_fw_update,
+ .write_fw_block = elan_i2c_write_fw_block,
+ .finish_fw_update = elan_i2c_finish_fw_update,
+
+ .get_report = elan_i2c_get_report,
+};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
new file mode 100644
index 000000000000..359bf8583d54
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -0,0 +1,514 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - SMBus interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "elan_i2c.h"
+
+/* Elan SMbus commands */
+#define ETP_SMBUS_IAP_CMD 0x00
+#define ETP_SMBUS_ENABLE_TP 0x20
+#define ETP_SMBUS_SLEEP_CMD 0x21
+#define ETP_SMBUS_IAP_PASSWORD_WRITE 0x29
+#define ETP_SMBUS_IAP_PASSWORD_READ 0x80
+#define ETP_SMBUS_WRITE_FW_BLOCK 0x2A
+#define ETP_SMBUS_IAP_RESET_CMD 0x2B
+#define ETP_SMBUS_RANGE_CMD 0xA0
+#define ETP_SMBUS_FW_VERSION_CMD 0xA1
+#define ETP_SMBUS_XY_TRACENUM_CMD 0xA2
+#define ETP_SMBUS_SM_VERSION_CMD 0xA3
+#define ETP_SMBUS_UNIQUEID_CMD 0xA3
+#define ETP_SMBUS_RESOLUTION_CMD 0xA4
+#define ETP_SMBUS_HELLOPACKET_CMD 0xA7
+#define ETP_SMBUS_PACKET_QUERY 0xA8
+#define ETP_SMBUS_IAP_VERSION_CMD 0xAC
+#define ETP_SMBUS_IAP_CTRL_CMD 0xAD
+#define ETP_SMBUS_IAP_CHECKSUM_CMD 0xAE
+#define ETP_SMBUS_FW_CHECKSUM_CMD 0xAF
+#define ETP_SMBUS_MAX_BASELINE_CMD 0xC3
+#define ETP_SMBUS_MIN_BASELINE_CMD 0xC4
+#define ETP_SMBUS_CALIBRATE_QUERY 0xC5
+
+#define ETP_SMBUS_REPORT_LEN 32
+#define ETP_SMBUS_REPORT_OFFSET 2
+#define ETP_SMBUS_HELLOPACKET_LEN 5
+#define ETP_SMBUS_IAP_PASSWORD 0x1234
+#define ETP_SMBUS_IAP_MODE_ON (1 << 6)
+
+static int elan_smbus_initialize(struct i2c_client *client)
+{
+ u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
+ u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ int len, error;
+
+ /* Get hello packet */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_HELLOPACKET_CMD, values);
+ if (len != ETP_SMBUS_HELLOPACKET_LEN) {
+ dev_err(&client->dev, "hello packet length fail: %d\n", len);
+ error = len < 0 ? len : -EIO;
+ return error;
+ }
+
+ /* compare hello packet */
+ if (memcmp(values, check, ETP_SMBUS_HELLOPACKET_LEN)) {
+ dev_err(&client->dev, "hello packet fail [%*px]\n",
+ ETP_SMBUS_HELLOPACKET_LEN, values);
+ return -ENXIO;
+ }
+
+ /* enable tp */
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_ENABLE_TP);
+ if (error) {
+ dev_err(&client->dev, "failed to enable touchpad: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_mode(struct i2c_client *client, u8 mode)
+{
+ u8 cmd[4] = { 0x00, 0x07, 0x00, mode };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_sleep_control(struct i2c_client *client, bool sleep)
+{
+ if (sleep)
+ return i2c_smbus_write_byte(client, ETP_SMBUS_SLEEP_CMD);
+ else
+ return 0; /* XXX should we send ETP_SMBUS_ENABLE_TP here? */
+}
+
+static int elan_smbus_power_control(struct i2c_client *client, bool enable)
+{
+ return 0; /* A no-op */
+}
+
+static int elan_smbus_calibrate(struct i2c_client *client)
+{
+ u8 cmd[4] = { 0x00, 0x08, 0x00, 0x01 };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_CALIBRATE_QUERY, val);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int elan_smbus_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ max_baseline ?
+ ETP_SMBUS_MAX_BASELINE_CMD :
+ ETP_SMBUS_MIN_BASELINE_CMD,
+ val);
+ if (error < 0)
+ return error;
+
+ *value = be16_to_cpup((__be16 *)val);
+
+ return 0;
+}
+
+static int elan_smbus_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_IAP_VERSION_CMD :
+ ETP_SMBUS_FW_VERSION_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[2];
+ return 0;
+}
+
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_SM_VERSION_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0]; /* XXX Why 0 and not 2 as in IAP/FW versions? */
+ return 0;
+}
+
+static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_UNIQUEID_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[1];
+ return 0;
+}
+
+static int elan_smbus_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
+ ETP_SMBUS_IAP_CHECKSUM_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = be16_to_cpup((__be16 *)val);
+ return 0;
+}
+
+static int elan_smbus_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get dimensions: %d\n", error);
+ return error;
+ }
+
+ *max_x = (0x0f & val[0]) << 8 | val[1];
+ *max_y = (0xf0 & val[0]) << 4 | val[2];
+
+ return 0;
+}
+
+static int elan_smbus_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[1] & 0x0F;
+ *hw_res_y = (val[1] & 0xF0) >> 4;
+
+ return 0;
+}
+
+static int elan_smbus_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[1] - 1;
+ *y_traces = val[2] - 1;
+
+ return 0;
+}
+
+static int elan_smbus_iap_get_mode(struct i2c_client *client,
+ enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to read iap ctrol register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = be16_to_cpup((__be16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_SMBUS_IAP_MODE_ON) ? IAP_MODE : MAIN_MODE;
+
+ return 0;
+}
+
+static int elan_smbus_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_IAP_RESET_CMD);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_flash_key(struct i2c_client *client)
+{
+ int error;
+ u8 cmd[4] = { 0x00, 0x0B, 0x00, 0x5A };
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int len;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_smbus_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+
+ /* set flash key */
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* write iap password */
+ if (i2c_smbus_write_byte(client,
+ ETP_SMBUS_IAP_PASSWORD_WRITE) < 0) {
+ dev_err(dev, "cannot write iap password\n");
+ return -EIO;
+ }
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(dev, "failed to write iap password: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Read back password to make sure we enabled flash
+ * successfully.
+ */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_PASSWORD_READ,
+ val);
+ if (len < sizeof(u16)) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = be16_to_cpup((__be16 *)val);
+ if (password != ETP_SMBUS_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password = 0x%X\n", password);
+ return -EIO;
+ }
+
+ /* Wait 30ms for MAIN_MODE change to IAP_MODE */
+ msleep(30);
+ }
+
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Reset IC */
+ error = elan_smbus_iap_reset(client);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+
+static int elan_smbus_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u16 result;
+ u8 val[3];
+
+ /*
+ * Due to the limitation of smbus protocol limiting
+ * transfer to 32 bytes at a time, we must split block
+ * in 2 transfers.
+ */
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 1, error);
+ return error;
+ }
+
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page + ETP_FW_PAGE_SIZE / 2);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 2, error);
+ return error;
+ }
+
+
+ /* Wait for F/W to update one page ROM data. */
+ usleep_range(8000, 10000);
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(dev, "Failed to read IAP write result: %d\n",
+ error);
+ return error;
+ }
+
+ result = be16_to_cpup((__be16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_PACKET_QUERY,
+ &report[ETP_SMBUS_REPORT_OFFSET]);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_SMBUS_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_SMBUS_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_finish_fw_update(struct i2c_client *client,
+ struct completion *fw_completion)
+{
+ /* No special handling unlike I2C transport */
+ return 0;
+}
+
+const struct elan_transport_ops elan_smbus_ops = {
+ .initialize = elan_smbus_initialize,
+ .sleep_control = elan_smbus_sleep_control,
+ .power_control = elan_smbus_power_control,
+ .set_mode = elan_smbus_set_mode,
+
+ .calibrate = elan_smbus_calibrate,
+ .calibrate_result = elan_smbus_calibrate_result,
+
+ .get_baseline_data = elan_smbus_get_baseline_data,
+
+ .get_version = elan_smbus_get_version,
+ .get_sm_version = elan_smbus_get_sm_version,
+ .get_product_id = elan_smbus_get_product_id,
+ .get_checksum = elan_smbus_get_checksum,
+
+ .get_max = elan_smbus_get_max,
+ .get_resolution = elan_smbus_get_resolution,
+ .get_num_traces = elan_smbus_get_num_traces,
+
+ .iap_get_mode = elan_smbus_iap_get_mode,
+ .iap_reset = elan_smbus_iap_reset,
+
+ .prepare_fw_update = elan_smbus_prepare_fw_update,
+ .write_fw_block = elan_smbus_write_fw_block,
+ .finish_fw_update = elan_smbus_finish_fw_update,
+
+ .get_report = elan_smbus_get_report,
+};
diff --git a/drivers/input/mouse/lifebook.h b/drivers/input/mouse/lifebook.h
index 4c4326c6f504..0baf02a70a99 100644
--- a/drivers/input/mouse/lifebook.h
+++ b/drivers/input/mouse/lifebook.h
@@ -16,14 +16,14 @@ void lifebook_module_init(void);
int lifebook_detect(struct psmouse *psmouse, bool set_properties);
int lifebook_init(struct psmouse *psmouse);
#else
-inline void lifebook_module_init(void)
+static inline void lifebook_module_init(void)
{
}
-inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
+static inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
{
return -ENOSYS;
}
-inline int lifebook_init(struct psmouse *psmouse)
+static inline int lifebook_init(struct psmouse *psmouse)
{
return -ENOSYS;
}
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 2a0360f5b5f7..d6e8f58a1de3 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -318,8 +318,7 @@ static int navpoint_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int navpoint_suspend(struct device *dev)
+static int __maybe_unused navpoint_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -333,7 +332,7 @@ static int navpoint_suspend(struct device *dev)
return 0;
}
-static int navpoint_resume(struct device *dev)
+static int __maybe_unused navpoint_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -346,7 +345,6 @@ static int navpoint_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ad822608f6ee..878f18498f3b 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -614,8 +614,7 @@ static int synaptics_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int synaptics_i2c_suspend(struct device *dev)
+static int __maybe_unused synaptics_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -628,7 +627,7 @@ static int synaptics_i2c_suspend(struct device *dev)
return 0;
}
-static int synaptics_i2c_resume(struct device *dev)
+static int __maybe_unused synaptics_i2c_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
@@ -643,7 +642,6 @@ static int synaptics_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(synaptics_i2c_pm, synaptics_i2c_suspend,
synaptics_i2c_resume);
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 8921c96589be..131d7826dc6b 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -24,9 +24,7 @@
struct ps2if {
struct serio *io;
- struct resource *iomem_res;
void __iomem *base;
- unsigned irq;
};
/*
@@ -83,16 +81,34 @@ static void altera_ps2_close(struct serio *io)
static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
+ struct resource *res;
struct serio *serio;
int error, irq;
- ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
- serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!ps2if || !serio) {
- error = -ENOMEM;
- goto err_free_mem;
+ ps2if = devm_kzalloc(&pdev->dev, sizeof(struct ps2if), GFP_KERNEL);
+ if (!ps2if)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ps2if->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ps2if->base))
+ return PTR_ERR(ps2if->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ error = devm_request_irq(&pdev->dev, irq, altera_ps2_rxint, 0,
+ pdev->name, ps2if);
+ if (error) {
+ dev_err(&pdev->dev, "could not request IRQ %d\n", irq);
+ return error;
}
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
serio->id.type = SERIO_8042;
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
@@ -103,56 +119,12 @@ static int altera_ps2_probe(struct platform_device *pdev)
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
- ps2if->iomem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ps2if->iomem_res == NULL) {
- error = -ENOENT;
- goto err_free_mem;
- }
-
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- error = -ENXIO;
- goto err_free_mem;
- }
- ps2if->irq = irq;
-
- if (!request_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res), pdev->name)) {
- error = -EBUSY;
- goto err_free_mem;
- }
-
- ps2if->base = ioremap(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- if (!ps2if->base) {
- error = -ENOMEM;
- goto err_free_res;
- }
-
- error = request_irq(ps2if->irq, altera_ps2_rxint, 0, pdev->name, ps2if);
- if (error) {
- dev_err(&pdev->dev, "could not allocate IRQ %d: %d\n",
- ps2if->irq, error);
- goto err_unmap;
- }
-
- dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, ps2if->irq);
+ dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, irq);
serio_register_port(ps2if->io);
platform_set_d