aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-04-25 09:42:42 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-04-25 09:42:42 +0100
commita126f7c41d80322b42ae0383ed3dcb17ee0296fc (patch)
tree67f3605e72e01f7ec0b15af22d9d7b6ef8598b55
parent0098fc39e6d575f940487f09f303787efbc7a373 (diff)
parenta7eb7c6f9a657a01a8359edae31bbeacd18b072c (diff)
downloadvexpress-lsk-a126f7c41d80322b42ae0383ed3dcb17ee0296fc.tar.gz
Merge branch 'mcpm' of git://git.linaro.org/people/nico/linux into devel-stable
-rw-r--r--CREDITS6
-rw-r--r--Documentation/SubmittingPatches3
-rw-r--r--Documentation/arm/cluster-pm-race-avoidance.txt498
-rw-r--r--Documentation/arm/vlocks.txt211
-rw-r--r--Documentation/device-mapper/dm-raid.txt44
-rw-r--r--Documentation/devicetree/bindings/mfd/ab8500.txt6
-rw-r--r--Documentation/devicetree/bindings/tty/serial/of-serial.txt3
-rw-r--r--Documentation/hwmon/adm12752
-rw-r--r--Documentation/hwmon/adt741011
-rw-r--r--Documentation/hwmon/jc422
-rw-r--r--Documentation/hwmon/lineage-pem2
-rw-r--r--Documentation/hwmon/lm250662
-rw-r--r--Documentation/hwmon/ltc29786
-rw-r--r--Documentation/hwmon/ltc42612
-rw-r--r--Documentation/hwmon/max160642
-rw-r--r--Documentation/hwmon/max160652
-rw-r--r--Documentation/hwmon/max344402
-rw-r--r--Documentation/hwmon/max86882
-rw-r--r--Documentation/hwmon/pmbus2
-rw-r--r--Documentation/hwmon/smm6652
-rw-r--r--Documentation/hwmon/ucd90002
-rw-r--r--Documentation/hwmon/ucd92002
-rw-r--r--Documentation/hwmon/zl61002
-rw-r--r--Documentation/input/alps.txt67
-rw-r--r--Documentation/networking/tuntap.txt77
-rw-r--r--Documentation/power/opp.txt25
-rw-r--r--Documentation/printk-formats.txt2
-rw-r--r--Documentation/trace/ftrace.txt2
-rw-r--r--MAINTAINERS45
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/boot/head.S1
-rw-r--r--arch/arm/Kconfig21
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts8
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi5
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi2
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi3
-rw-r--r--arch/arm/boot/dts/dove.dtsi5
-rw-r--r--arch/arm/boot/dts/href.dtsi2
-rw-r--r--arch/arm/boot/dts/hrefv60plus.dts2
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts3
-rw-r--r--arch/arm/boot/dts/kirkwood-dns320.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-dns325.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dockstar.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-km_kirkwood.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lschlv2.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxhl.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2-common.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi5
-rw-r--r--arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts2
-rw-r--r--arch/arm/boot/dts/snowball.dts2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi1
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/mcpm_entry.c263
-rw-r--r--arch/arm/common/mcpm_head.S219
-rw-r--r--arch/arm/common/mcpm_platsmp.c92
-rw-r--r--arch/arm/common/vlock.S108
-rw-r--r--arch/arm/common/vlock.h29
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/mcpm.h209
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h34
-rw-r--r--arch/arm/include/asm/xen/events.h25
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c6
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/head.S26
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/smp_tlb.c12
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/suspend.c1
-rw-r--r--arch/arm/lib/memset.S100
-rw-r--r--arch/arm/mach-at91/board-foxg20.c1
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c1
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/headsmp.S18
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c15
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c25
-rw-r--r--arch/arm/mach-mxs/icoll.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c10
-rw-r--r--arch/arm/mach-mxs/mm.c1
-rw-r--r--arch/arm/mach-mxs/ocotp.c1
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h64
-rw-r--r--arch/arm/mach-omap1/common.h2
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/mux.c9
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c2
-rw-r--r--arch/arm/mm/context.c29
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/plat-orion/addr-map.c7
-rw-r--r--arch/arm/plat-spear/Kconfig2
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/kernel/perfmon.c1
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/uapi/asm/stat.h4
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/include/asm/MC68328.h10
-rw-r--r--arch/m68k/kernel/setup_no.c3
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/platform/coldfire/m528x.c2
-rw-r--r--arch/metag/include/asm/elf.h3
-rw-r--r--arch/metag/mm/Kconfig1
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mn10300/Kconfig2
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S4
-rw-r--r--arch/powerpc/include/asm/bitops.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/s390/include/asm/cpu_mf.h1
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/kernel/compat.c42
-rw-r--r--arch/um/drivers/chan.h2
-rw-r--r--arch/um/drivers/chan_kern.c4
-rw-r--r--arch/um/drivers/chan_user.c12
-rw-r--r--arch/um/drivers/chan_user.h6
-rw-r--r--arch/um/drivers/line.c42
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/bootparam_utils.h20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c10
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--drivers/acpi/glue.c55
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/tegra-ahb.c2
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c217
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/bcma/driver_pci_host.c2
-rw-r--r--drivers/char/hw_random/core.c19
-rw-r--r--drivers/char/hw_random/virtio-rng.c13
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c42
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/firmware/efivars.c131
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpiolib.c143
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c25
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c174
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/hid/hid-logitech-dj.c22
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c40
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c12
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c9
-rw-r--r--drivers/iio/dac/ad5064.c64
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c8
-rw-r--r--drivers/input/mouse/alps.c85
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/cypress_ps2.c19
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c7
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c68
-rw-r--r--drivers/input/touchscreen/mms114.c34
-rw-r--r--drivers/iommu/dmar.c1
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/isdn/hisax/st5481_usb.c12
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/mailbox/pl320-ipc.c3
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-raid.c123
-rw-r--r--drivers/md/md.c19
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c97
-rw-r--r--drivers/md/raid10.h5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ab8500-gpadc.c17
-rw-r--r--drivers/mfd/omap-usb-host.c6
-rw-r--r--drivers/mfd/palmas.c36
-rw-r--r--drivers/mfd/tps65912-core.c1
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-madc.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c1
-rw-r--r--drivers/mtd/mtdchar.c1
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fec.c85
-rw-r--r--drivers/net/ethernet/freescale/fec.h18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c82
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c76
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig18
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c31
-rw-r--r--drivers/net/usb/ax88179_178a.c1448
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c104
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c133
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h35
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c299
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/oprofile/oprofilefs.c1
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/platform/x86/chromeos_laptop.c41
-rw-r--r--drivers/pnp/pnpacpi/core.c8
-rw-r--r--drivers/regulator/core.c12
-rw-r--r--drivers/regulator/db8500-prcmu.c4
-rw-r--r--drivers/regulator/palmas-regulator.c3
-rw-r--r--drivers/regulator/twl-regulator.c9
-rw-r--r--drivers/rtc/rtc-mv.c28
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/staging/ccg/f_fs.c1
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c16
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c31
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c30
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c27
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c23
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c70
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c4
-rw-r--r--drivers/staging/zcache/ramster/tcp.c25
-rw-r--r--drivers/tty/hvc/hvcs.c9
-rw-r--r--drivers/tty/serial/8250/8250.c52
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c12
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c8
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/of_serial.c6
-rw-r--r--drivers/tty/serial/vt8500_serial.c9
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c4
-rw-r--r--drivers/usb/chipidea/udc.c6
-rw-r--r--drivers/usb/class/cdc-wdm.c23
-rw-r--r--drivers/usb/core/usb-acpi.c9
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c8
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/dwc3/gadget.c3
-rw-r--r--drivers/usb/gadget/Makefile12
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/f_fs.c1
-rw-r--r--drivers/usb/gadget/f_uac1.c1
-rw-r--r--drivers/usb/gadget/imx_udc.c20
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/omap_udc.c3
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c24
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c18
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c28
-rw-r--r--drivers/usb/gadget/u_uac1.c3
-rw-r--r--drivers/usb/host/ehci-hcd.c6
-rw-r--r--drivers/usb/host/ehci-q.c36
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/omap2430.c12
-rw-r--r--drivers/usb/otg/otg.c10
-rw-r--r--drivers/usb/phy/omap-control-usb.c24
-rw-r--r--drivers/usb/phy/omap-usb3.c8
-rw-r--r--drivers/usb/phy/samsung-usbphy.c8
-rw-r--r--drivers/usb/serial/cp210x.c20
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/qcaux.c1
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/usb/serial/quatech2.c7
-rw-r--r--drivers/usb/storage/initializers.c76
-rw-r--r--drivers/usb/storage/initializers.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h337
-rw-r--r--drivers/video/omap/lcd_ams_delta.c1
-rw-r--r--drivers/video/omap/lcd_osk.c3
-rw-r--r--drivers/w1/masters/w1-gpio.c6
-rw-r--r--drivers/w1/w1.c3
-rw-r--r--drivers/xen/xen-acpi-processor.c8
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c3
-rw-r--r--drivers/xen/xen-stub.c1
-rw-r--r--drivers/xen/xenfs/super.c1
-rw-r--r--fs/9p/vfs_super.c1
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/affs/super.c1
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/autofs4/init.c1
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/inode.c1
-rw-r--r--fs/binfmt_misc.c1
-rw-r--r--fs/btrfs/delayed-inode.c151
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/disk-io.c16
-rw-r--r--fs/btrfs/extent-tree.c5
-rw-r--r--fs/btrfs/file.c1
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/locking.h1
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/relocation.c74
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/transaction.c76
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/btrfs/volumes.c20
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/inode.c11
-rw-r--r--fs/cifs/smb2ops.c1
-rw-r--r--fs/coda/inode.c1
-rw-r--r--fs/compat.c15
-rw-r--r--fs/configfs/mount.c1
-rw-r--r--fs/cramfs/inode.c1
-rw-r--r--fs/debugfs/inode.c1
-rw-r--r--fs/ecryptfs/Kconfig8
-rw-r--r--fs/ecryptfs/Makefile7
-rw-r--r--fs/ecryptfs/crypto.c9
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h40
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/ecryptfs/main.c1
-rw-r--r--fs/ecryptfs/messaging.c5
-rw-r--r--fs/efs/super.c1
-rw-r--r--fs/exofs/super.c1
-rw-r--r--fs/ext2/ialloc.c1
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext3/super.c5
-rw-r--r--fs/ext4/super.c7
-rw-r--r--fs/f2fs/super.c1
-rw-r--r--fs/fat/namei_msdos.c1
-rw-r--r--fs/fat/namei_vfat.c1
-rw-r--r--fs/filesystems.c2
-rw-r--r--fs/freevxfs/vxfs_super.c3
-rw-r--r--fs/fuse/control.c1
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/hfs/super.c1
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/hostfs/hostfs_kern.c10
-rw-r--r--fs/hpfs/super.c1
-rw-r--r--fs/hppfs/hppfs.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/isofs/inode.c4
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/logfs/super.c1
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ncpfs/inode.c1
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/nfs4state.c36
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nilfs2/super.c1
-rw-r--r--fs/ntfs/super.c1
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c1
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/omfs/inode.c1
-rw-r--r--fs/openpromfs/inode.c1
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/proc/namespaces.c12
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/qnx6/inode.c1
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/reiserfs/super.c5
-rw-r--r--fs/romfs/super.c1
-rw-r--r--fs/squashfs/super.c1
-rw-r--r--fs/sysv/super.c4
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--fs/udf/super.c1
-rw-r--r--fs/ufs/super.c1
-rw-r--r--fs/xfs/xfs_super.c1
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/atomic.h6
-rw-r--r--include/asm-generic/cmpxchg.h10
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/linux/ecryptfs.h12
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/i2c/atmel_mxt_ts.h5
-rw-r--r--include/linux/idr.h68
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/list.h4
-rw-r--r--include/linux/mfd/palmas.h1
-rw-r--r--include/linux/mfd/tps65912.h1
-rw-r--r--include/linux/mfd/wm831x/auxadc.h2
-rw-r--r--include/linux/mfd/wm831x/core.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h4
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/res_counter.h1
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/uapi/linux/acct.h6
-rw-r--r--include/uapi/linux/aio_abi.h4
-rw-r--r--include/uapi/linux/raid/md_p.h6
-rw-r--r--include/uapi/linux/serial_core.h5
-rw-r--r--init/Kconfig4
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/msgutil.c3
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c46
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/smpboot.c4
-rw-r--r--kernel/softirq.c21
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/trace/Kconfig24
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/idr.c96
-rw-r--r--lib/xz/Kconfig2
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/fremap.c5
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/process_vm_access.c8
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/ceph/osdmap.c42
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/ieee802154/6lowpan.h2
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c29
-rw-r--r--net/irda/iriap.c7
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/mac80211/cfg.c11
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/mlme.c28
-rw-r--r--net/mac80211/tx.c80
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/xt_AUDIT.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c27
-rw-r--r--net/rds/message.c8
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/sched/sch_qfq.c66
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/tsnmap.c13
-rw-r--r--net/sctp/ulpqueue.c87
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/nl80211.c110
-rw-r--r--scripts/Makefile.headersinst11
-rw-r--r--security/keys/compat.c4
-rw-r--r--security/keys/process_keys.c4
-rw-r--r--sound/core/seq/oss/seq_oss_event.c14
-rw-r--r--sound/core/seq/seq_timer.c8
-rw-r--r--sound/core/vmaster.c5
-rw-r--r--sound/oss/sequencer.c6
-rw-r--r--sound/pci/asihpi/asihpi.c3
-rw-r--r--sound/pci/hda/hda_codec.c35
-rw-r--r--sound/pci/hda/patch_ca0132.c36
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c29
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/soc/codecs/wm5102.c15
-rw-r--r--sound/soc/codecs/wm5110.c16
-rw-r--r--sound/soc/codecs/wm8350.c4
-rw-r--r--sound/soc/codecs/wm8960.c8
-rw-r--r--sound/soc/tegra/tegra20_i2s.h2
-rw-r--r--sound/soc/tegra/tegra30_i2s.h2
-rw-r--r--sound/usb/card.c15
-rw-r--r--tools/testing/selftests/efivarfs/efivarfs.sh59
-rw-r--r--tools/usb/ffs-test.c2
646 files changed, 8313 insertions, 2969 deletions
diff --git a/CREDITS b/CREDITS
index 948e0fb9a70..78163cb3eb6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -953,11 +953,11 @@ S: Blacksburg, Virginia 24061
S: USA
N: Randy Dunlap
-E: rdunlap@xenotime.net
-W: http://www.xenotime.net/linux/linux.html
-W: http://www.linux-usb.org
+E: rdunlap@infradead.org
+W: http://www.infradead.org/~rdunlap/
D: Linux-USB subsystem, USB core/UHCI/printer/storage drivers
D: x86 SMP, ACPI, bootflag hacking
+D: documentation, builds
S: (ask for current address)
S: USA
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index c379a2a6949..aa0c1e63f05 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -60,8 +60,7 @@ own source tree. For example:
"dontdiff" is a list of files which are generated by the kernel during
the build process, and should be ignored in any diff(1)-generated
patch. The "dontdiff" file is included in the kernel tree in
-2.6.12 and later. For earlier kernel versions, you can get it
-from <http://www.xenotime.net/linux/doc/dontdiff>.
+2.6.12 and later.
Make sure your patch does not include any extra files which do not
belong in a patch submission. Make sure to review your patch -after-
diff --git a/Documentation/arm/cluster-pm-race-avoidance.txt b/Documentation/arm/cluster-pm-race-avoidance.txt
new file mode 100644
index 00000000000..750b6fc24af
--- /dev/null
+++ b/Documentation/arm/cluster-pm-race-avoidance.txt
@@ -0,0 +1,498 @@
+Cluster-wide Power-up/power-down race avoidance algorithm
+=========================================================
+
+This file documents the algorithm which is used to coordinate CPU and
+cluster setup and teardown operations and to manage hardware coherency
+controls safely.
+
+The section "Rationale" explains what the algorithm is for and why it is
+needed. "Basic model" explains general concepts using a simplified view
+of the system. The other sections explain the actual details of the
+algorithm in use.
+
+
+Rationale
+---------
+
+In a system containing multiple CPUs, it is desirable to have the
+ability to turn off individual CPUs when the system is idle, reducing
+power consumption and thermal dissipation.
+
+In a system containing multiple clusters of CPUs, it is also desirable
+to have the ability to turn off entire clusters.
+
+Turning entire clusters off and on is a risky business, because it
+involves performing potentially destructive operations affecting a group
+of independently running CPUs, while the OS continues to run. This
+means that we need some coordination in order to ensure that critical
+cluster-level operations are only performed when it is truly safe to do
+so.
+
+Simple locking may not be sufficient to solve this problem, because
+mechanisms like Linux spinlocks may rely on coherency mechanisms which
+are not immediately enabled when a cluster powers up. Since enabling or
+disabling those mechanisms may itself be a non-atomic operation (such as
+writing some hardware registers and invalidating large caches), other
+methods of coordination are required in order to guarantee safe
+power-down and power-up at the cluster level.
+
+The mechanism presented in this document describes a coherent memory
+based protocol for performing the needed coordination. It aims to be as
+lightweight as possible, while providing the required safety properties.
+
+
+Basic model
+-----------
+
+Each cluster and CPU is assigned a state, as follows:
+
+ DOWN
+ COMING_UP
+ UP
+ GOING_DOWN
+
+ +---------> UP ----------+
+ | v
+
+ COMING_UP GOING_DOWN
+
+ ^ |
+ +--------- DOWN <--------+
+
+
+DOWN: The CPU or cluster is not coherent, and is either powered off or
+ suspended, or is ready to be powered off or suspended.
+
+COMING_UP: The CPU or cluster has committed to moving to the UP state.
+ It may be part way through the process of initialisation and
+ enabling coherency.
+
+UP: The CPU or cluster is active and coherent at the hardware
+ level. A CPU in this state is not necessarily being used
+ actively by the kernel.
+
+GOING_DOWN: The CPU or cluster has committed to moving to the DOWN
+ state. It may be part way through the process of teardown and
+ coherency exit.
+
+
+Each CPU has one of these states assigned to it at any point in time.
+The CPU states are described in the "CPU state" section, below.
+
+Each cluster is also assigned a state, but it is necessary to split the
+state value into two parts (the "cluster" state and "inbound" state) and
+to introduce additional states in order to avoid races between different
+CPUs in the cluster simultaneously modifying the state. The cluster-
+level states are described in the "Cluster state" section.
+
+To help distinguish the CPU states from cluster states in this
+discussion, the state names are given a CPU_ prefix for the CPU states,
+and a CLUSTER_ or INBOUND_ prefix for the cluster states.
+
+
+CPU state
+---------
+
+In this algorithm, each individual core in a multi-core processor is
+referred to as a "CPU". CPUs are assumed to be single-threaded:
+therefore, a CPU can only be doing one thing at a single point in time.
+
+This means that CPUs fit the basic model closely.
+
+The algorithm defines the following states for each CPU in the system:
+
+ CPU_DOWN
+ CPU_COMING_UP
+ CPU_UP
+ CPU_GOING_DOWN
+
+ cluster setup and
+ CPU setup complete policy decision
+ +-----------> CPU_UP ------------+
+ | v
+
+ CPU_COMING_UP CPU_GOING_DOWN
+
+ ^ |
+ +----------- CPU_DOWN <----------+
+ policy decision CPU teardown complete
+ or hardware event
+
+
+The definitions of the four states correspond closely to the states of
+the basic model.
+
+Transitions between states occur as follows.
+
+A trigger event (spontaneous) means that the CPU can transition to the
+next state as a result of making local progress only, with no
+requirement for any external event to happen.
+
+
+CPU_DOWN:
+
+ A CPU reaches the CPU_DOWN state when it is ready for
+ power-down. On reaching this state, the CPU will typically
+ power itself down or suspend itself, via a WFI instruction or a
+ firmware call.
+
+ Next state: CPU_COMING_UP
+ Conditions: none
+
+ Trigger events:
+
+ a) an explicit hardware power-up operation, resulting
+ from a policy decision on another CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CPU_COMING_UP:
+
+ A CPU cannot start participating in hardware coherency until the
+ cluster is set up and coherent. If the cluster is not ready,
+ then the CPU will wait in the CPU_COMING_UP state until the
+ cluster has been set up.
+
+ Next state: CPU_UP
+ Conditions: The CPU's parent cluster must be in CLUSTER_UP.
+ Trigger events: Transition of the parent cluster to CLUSTER_UP.
+
+ Refer to the "Cluster state" section for a description of the
+ CLUSTER_UP state.
+
+
+CPU_UP:
+ When a CPU reaches the CPU_UP state, it is safe for the CPU to
+ start participating in local coherency.
+
+ This is done by jumping to the kernel's CPU resume code.
+
+ Note that the definition of this state is slightly different
+ from the basic model definition: CPU_UP does not mean that the
+ CPU is coherent yet, but it does mean that it is safe to resume
+ the kernel. The kernel handles the rest of the resume
+ procedure, so the remaining steps are not visible as part of the
+ race avoidance algorithm.
+
+ The CPU remains in this state until an explicit policy decision
+ is made to shut down or suspend the CPU.
+
+ Next state: CPU_GOING_DOWN
+ Conditions: none
+ Trigger events: explicit policy decision
+
+
+CPU_GOING_DOWN:
+
+ While in this state, the CPU exits coherency, including any
+ operations required to achieve this (such as cleaning data
+ caches).
+
+ Next state: CPU_DOWN
+ Conditions: local CPU teardown complete
+ Trigger events: (spontaneous)
+
+
+Cluster state
+-------------
+
+A cluster is a group of connected CPUs with some common resources.
+Because a cluster contains multiple CPUs, it can be doing multiple
+things at the same time. This has some implications. In particular, a
+CPU can start up while another CPU is tearing the cluster down.
+
+In this discussion, the "outbound side" is the view of the cluster state
+as seen by a CPU tearing the cluster down. The "inbound side" is the
+view of the cluster state as seen by a CPU setting the CPU up.
+
+In order to enable safe coordination in such situations, it is important
+that a CPU which is setting up the cluster can advertise its state
+independently of the CPU which is tearing down the cluster. For this
+reason, the cluster state is split into two parts:
+
+ "cluster" state: The global state of the cluster; or the state
+ on the outbound side:
+
+ CLUSTER_DOWN
+ CLUSTER_UP
+ CLUSTER_GOING_DOWN
+
+ "inbound" state: The state of the cluster on the inbound side.
+
+ INBOUND_NOT_COMING_UP
+ INBOUND_COMING_UP
+
+
+ The different pairings of these states results in six possible
+ states for the cluster as a whole:
+
+ CLUSTER_UP
+ +==========> INBOUND_NOT_COMING_UP -------------+
+ # |
+ |
+ CLUSTER_UP <----+ |
+ INBOUND_COMING_UP | v
+
+ ^ CLUSTER_GOING_DOWN CLUSTER_GOING_DOWN
+ # INBOUND_COMING_UP <=== INBOUND_NOT_COMING_UP
+
+ CLUSTER_DOWN | |
+ INBOUND_COMING_UP <----+ |
+ |
+ ^ |
+ +=========== CLUSTER_DOWN <------------+
+ INBOUND_NOT_COMING_UP
+
+ Transitions -----> can only be made by the outbound CPU, and
+ only involve changes to the "cluster" state.
+
+ Transitions ===##> can only be made by the inbound CPU, and only
+ involve changes to the "inbound" state, except where there is no
+ further transition possible on the outbound side (i.e., the
+ outbound CPU has put the cluster into the CLUSTER_DOWN state).
+
+ The race avoidance algorithm does not provide a way to determine
+ which exact CPUs within the cluster play these roles. This must
+ be decided in advance by some other means. Refer to the section
+ "Last man and first man selection" for more explanation.
+
+
+ CLUSTER_DOWN/INBOUND_NOT_COMING_UP is the only state where the
+ cluster can actually be powered down.
+
+ The parallelism of the inbound and outbound CPUs is observed by
+ the existence of two different paths from CLUSTER_GOING_DOWN/
+ INBOUND_NOT_COMING_UP (corresponding to GOING_DOWN in the basic
+ model) to CLUSTER_DOWN/INBOUND_COMING_UP (corresponding to
+ COMING_UP in the basic model). The second path avoids cluster
+ teardown completely.
+
+ CLUSTER_UP/INBOUND_COMING_UP is equivalent to UP in the basic
+ model. The final transition to CLUSTER_UP/INBOUND_NOT_COMING_UP
+ is trivial and merely resets the state machine ready for the
+ next cycle.
+
+ Details of the allowable transitions follow.
+
+ The next state in each case is notated
+
+ <cluster state>/<inbound state> (<transitioner>)
+
+ where the <transitioner> is the side on which the transition
+ can occur; either the inbound or the outbound side.
+
+
+CLUSTER_DOWN/INBOUND_NOT_COMING_UP:
+
+ Next state: CLUSTER_DOWN/INBOUND_COMING_UP (inbound)
+ Conditions: none
+ Trigger events:
+
+ a) an explicit hardware power-up operation, resulting
+ from a policy decision on another CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CLUSTER_DOWN/INBOUND_COMING_UP:
+
+ In this state, an inbound CPU sets up the cluster, including
+ enabling of hardware coherency at the cluster level and any
+ other operations (such as cache invalidation) which are required
+ in order to achieve this.
+
+ The purpose of this state is to do sufficient cluster-level
+ setup to enable other CPUs in the cluster to enter coherency
+ safely.
+
+ Next state: CLUSTER_UP/INBOUND_COMING_UP (inbound)
+ Conditions: cluster-level setup and hardware coherency complete
+ Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_COMING_UP:
+
+ Cluster-level setup is complete and hardware coherency is
+ enabled for the cluster. Other CPUs in the cluster can safely
+ enter coherency.
+
+ This is a transient state, leading immediately to
+ CLUSTER_UP/INBOUND_NOT_COMING_UP. All other CPUs on the cluster
+ should consider treat these two states as equivalent.
+
+ Next state: CLUSTER_UP/INBOUND_NOT_COMING_UP (inbound)
+ Conditions: none
+ Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_NOT_COMING_UP:
+
+ Cluster-level setup is complete and hardware coherency is
+ enabled for the cluster. Other CPUs in the cluster can safely
+ enter coherency.
+
+ The cluster will remain in this state until a policy decision is
+ made to power the cluster down.
+
+ Next state: CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP (outbound)
+ Conditions: none
+ Trigger events: policy decision to power down the cluster
+
+
+CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP:
+
+ An outbound CPU is tearing the cluster down. The selected CPU
+ must wait in this state until all CPUs in the cluster are in the
+ CPU_DOWN state.
+
+ When all CPUs are in the CPU_DOWN state, the cluster can be torn
+ down, for example by cleaning data caches and exiting
+ cluster-level coherency.
+
+ To avoid wasteful unnecessary teardown operations, the outbound
+ should check the inbound cluster state for asynchronous
+ transitions to INBOUND_COMING_UP. Alternatively, individual
+ CPUs can be checked for entry into CPU_COMING_UP or CPU_UP.
+
+
+ Next states:
+
+ CLUSTER_DOWN/INBOUND_NOT_COMING_UP (outbound)
+ Conditions: cluster torn down and ready to power off
+ Trigger events: (spontaneous)
+
+ CLUSTER_GOING_DOWN/INBOUND_COMING_UP (inbound)
+ Conditions: none
+ Trigger events:
+
+ a) an explicit hardware power-up operation,
+ resulting from a policy decision on another
+ CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CLUSTER_GOING_DOWN/INBOUND_COMING_UP:
+
+ The cluster is (or was) being torn down, but another CPU has
+ come online in the meantime and is trying to set up the cluster
+ again.
+
+ If the outbound CPU observes this state, it has two choices:
+
+ a) back out of teardown, restoring the cluster to the
+ CLUSTER_UP state;
+
+ b) finish tearing the cluster down and put the cluster
+ in the CLUSTER_DOWN state; the inbound CPU will
+ set up the cluster again from there.
+
+ Choice (a) permits the removal of some latency by avoiding
+ unnecessary teardown and setup operations in situations where
+ the cluster is not really going to be powered down.
+
+
+ Next states:
+
+ CLUSTER_UP/INBOUND_COMING_UP (outbound)
+ Conditions: cluster-level setup and hardware
+ coherency complete
+ Trigger events: (spontaneous)
+
+ CLUSTER_DOWN/INBOUND_COMING_UP (outbound)
+ Conditions: cluster torn down and ready to power off
+ Trigger events: (spontaneous)
+
+
+Last man and First man selection
+--------------------------------
+
+The CPU which performs cluster tear-down operations on the outbound side
+is commonly referred to as the "last man".
+
+The CPU which performs cluster setup on the inbound side is commonly
+referred to as the "first man".
+
+The race avoidance algorithm documented above does not provide a
+mechanism to choose which CPUs should play these roles.
+
+
+Last man:
+
+When shutting down the cluster, all the CPUs involved are initially
+executing Linux and hence coherent. Therefore, ordinary spinlocks can
+be used to select a last man safely, before the CPUs become
+non-coherent.
+
+
+First man:
+
+Because CPUs may power up asynchronously in response to external wake-up
+events, a dynamic mechanism is needed to make sure that only one CPU
+attempts to play the first man role and do the cluster-level
+initialisation: any other CPUs must wait for this to complete before
+proceeding.
+
+Cluster-level initialisation may involve actions such as configuring
+coherency controls in the bus fabric.
+
+The current implementation in mcpm_head.S uses a separate mutual exclusion
+mechanism to do this arbitration. This mechanism is documented in
+detail in vlocks.txt.
+
+
+Features and Limitations
+------------------------
+
+Implementation:
+
+ The current ARM-based implementation is split between
+ arch/arm/common/mcpm_head.S (low-level inbound CPU operations) and
+ arch/arm/common/mcpm_entry.c (everything else):
+
+ __mcpm_cpu_going_down() signals the transition of a CPU to the
+ CPU_GOING_DOWN state.
+
+ __mcpm_cpu_down() signals the transition of a CPU to the CPU_DOWN
+ state.
+
+ A CPU transitions to CPU_COMING_UP and then to CPU_UP via the
+ low-level power-up code in mcpm_head.S. This could
+ involve CPU-specific setup code, but in the current
+ implementation it does not.
+
+ __mcpm_outbound_enter_critical() and __mcpm_outbound_leave_critical()
+ handle transitions from CLUSTER_UP to CLUSTER_GOING_DOWN
+ and from there to CLUSTER_DOWN or back to CLUSTER_UP (in
+ the case of an aborted cluster power-down).
+
+ These functions are more complex than the __mcpm_cpu_*()
+ functions due to the extra inter-CPU coordination which
+ is needed for safe transitions at the cluster level.
+
+ A cluster transitions from CLUSTER_DOWN back to CLUSTER_UP via
+ the low-level power-up code in mcpm_head.S. This
+ typically involves platform-specific setup code,
+ provided by the platform-specific power_up_setup
+ function registered via mcpm_sync_init.
+
+Deep topologies:
+
+ As currently described and implemented, the algorithm does not
+ support CPU topologies involving more than two levels (i.e.,
+ clusters of clusters are not supported). The algorithm could be
+ extended by replicating the cluster-level states for the
+ additional topological levels, and modifying the transition
+ rules for the intermediate (non-outermost) cluster levels.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, in
+collaboration with Nicolas Pitre and Achin Gupta.
+
+Copyright (C) 2012-2013 Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
diff --git a/Documentation/arm/vlocks.txt b/Documentation/arm/vlocks.txt
new file mode 100644
index 00000000000..415960a9bab
--- /dev/null
+++ b/Documentation/arm/vlocks.txt
@@ -0,0 +1,211 @@
+vlocks for Bare-Metal Mutual Exclusion
+======================================
+
+Voting Locks, or "vlocks" provide a simple low-level mutual exclusion
+mechanism, with reasonable but minimal requirements on the memory
+system.
+
+These are intended to be used to coordinate critical activity among CPUs
+which are otherwise non-coherent, in situations where the hardware
+provides no other mechanism to support this and ordinary spinlocks
+cannot be used.
+
+
+vlocks make use of the atomicity provided by the memory system for
+writes to a single memory location. To arbitrate, every CPU "votes for
+itself", by storing a unique number to a common memory location. The
+final value seen in that memory location when all the votes have been
+cast identifies the winner.
+
+In order to make sure that the election produces an unambiguous result
+in finite time, a CPU will only enter the election in the first place if
+no winner has been chosen and the election does not appear to have
+started yet.
+
+
+Algorithm
+---------
+
+The easiest way to explain the vlocks algorithm is with some pseudo-code:
+
+
+ int currently_voting[NR_CPUS] = { 0, };
+ int last_vote = -1; /* no votes yet */
+
+ bool vlock_trylock(int this_cpu)
+ {
+ /* signal our desire to vote */
+ currently_voting[this_cpu] = 1;
+ if (last_vote != -1) {
+ /* someone already volunteered himself */
+ currently_voting[this_cpu] = 0;
+ return false; /* not ourself */
+ }
+
+ /* let's suggest ourself */
+ last_vote = this_cpu;
+ currently_voting[this_cpu] = 0;
+
+ /* then wait until everyone else is done voting */
+ for_each_cpu(i) {
+ while (currently_voting[i] != 0)
+ /* wait */;
+ }
+
+ /* result */
+ if (last_vote == this_cpu)
+ return true; /* we won */
+ return false;
+ }
+
+ bool vlock_unlock(void)
+ {
+ last_vote = -1;
+ }
+
+
+The currently_voting[] array provides a way for the CPUs to determine
+whether an election is in progress, and plays a role analogous to the
+"entering" array in Lamport's bakery algorithm [1].
+
+However, once the election has started, the underlying memory system
+atomicity is used to pick the winner. This avoids the need for a static
+priority rule to act as a tie-breaker, or any counters which could
+overflow.
+
+As long as the last_vote variable is globally visible to all CPUs, it
+will contain only one value that won't change once every CPU has cleared
+its currently_voting flag.
+
+
+Features and limitations
+------------------------
+
+ * vlocks are not intended to be fair. In the contended case, it is the
+ _last_ CPU which attempts to get the lock which will be most likely
+ to win.
+
+ vlocks are therefore best suited to situations where it is necessary
+ to pick a unique winner, but it does not matter which CPU actually
+ wins.
+
+ * Like other similar mechanisms, vlocks will not scale well to a large
+ number of CPUs.
+
+ vlocks can be cascaded in a voting hierarchy to permit better scaling
+ if necessary, as in the following hypothetical example for 4096 CPUs:
+
+ /* first level: local election */
+ my_town = towns[(this_cpu >> 4) & 0xf];
+ I_won = vlock_trylock(my_town, this_cpu & 0xf);
+ if (I_won) {
+ /* we won the town election, let's go for the state */
+ my_state = states[(this_cpu >> 8) & 0xf];
+ I_won = vlock_lock(my_state, this_cpu & 0xf));
+ if (I_won) {
+ /* and so on */
+ I_won = vlock_lock(the_whole_country, this_cpu & 0xf];
+ if (I_won) {
+ /* ... */
+ }
+ vlock_unlock(the_whole_country);
+ }
+ vlock_unlock(my_state);
+ }
+ vlock_unlock(my_town);
+
+
+ARM implementation
+------------------
+
+The current ARM implementation [2] contains some optimisations beyond
+the basic algorithm:
+
+ * By packing the members of the currently_voting array close together,
+ we can read the whole array in one transaction (providing the number
+ of CPUs potentially contending the lock is small enough). This
+ reduces the number of round-trips required to external memory.
+
+ In the ARM implementation, this means that we can use a single load
+ and comparison:
+
+ LDR Rt, [Rn]
+ CMP Rt, #0
+
+ ...in place of code equivalent to:
+
+ LDRB Rt, [Rn]
+ CMP Rt, #0
+ LDRBEQ Rt, [Rn, #1]
+ CMPEQ Rt, #0
+ LDRBEQ Rt, [Rn, #2]
+ CMPEQ Rt, #0
+ LDRBEQ Rt, [Rn, #3]
+ CMPEQ Rt, #0
+
+ This cuts down on the fast-path latency, as well as potentially
+ reducing bus contention in contended cases.
+
+ The optimisation relies on the fact that the ARM memory system
+ guarantees coherency between overlapping memory accesses of
+ different sizes, similarly to many other architectures. Note that
+ we do not care which element of currently_voting appears in which
+ bits of Rt, so there is no need to worry about endianness in this
+ optimisation.
+
+ If there are too many CPUs to read the currently_voting array in
+ one transaction then multiple transations are still required. The
+ implementation uses a simple loop of word-sized loads for this
+ case. The number of transactions is still fewer than would be
+ required if bytes were loaded individually.
+
+
+ In principle, we could aggregate further by using LDRD or LDM, but
+ to keep the code simple this was not attempted in the initial
+ implementation.
+
+
+ * vlocks are currently only used to coordinate between CPUs which are
+ unable to enable their caches yet. This means that the
+ implementation removes many of the barriers which would be required
+ when executing the algorithm in cached memory.
+
+ packing of the currently_voting array does not work with cached
+ memory unless all CPUs contending the lock are cache-coherent, due
+ to cache writebacks from one CPU clobbering values written by other
+ CPUs. (Though if all the CPUs are cache-coherent, you should be
+ probably be using proper spinlocks instead anyway).
+
+
+ * The "no votes yet" value used for the last_vote variable is 0 (not
+ -1 as in the pseudocode). This allows statically-allocated vlocks
+ to be implicitly initialised to an unlocked state simply by putting
+ them in .bss.
+
+ An offset is added to each CPU's ID for the purpose of setting this
+ variable, so that no CPU uses the value 0 for its ID.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, for
+use in ARM-based big.LITTLE platforms, with review and input gratefully
+received from Nicolas Pitre and Achin Gupta. Thanks to Nicolas for
+grabbing most of this text out of the relevant mail thread and writing
+up the pseudocode.
+
+Copyright (C) 2012-2013 Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
+
+
+References
+----------
+
+[1] Lamport, L. "A New Solution of Dijkstra's Concurrent Programming
+ Problem", Communications of the ACM 17, 8 (August 1974), 453-455.
+
+ http://en.wikipedia.org/wiki/Lamport%27s_bakery_algorithm
+
+[2] linux/arch/arm/common/vlock.S, www.kernel.org.
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 56fb62b09fc..b428556197c 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
raid10 Various RAID10 inspired algorithms chosen by additional params
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
- RAID1E: Integrated Adjacent Stripe Mirroring
+ - RAID1E: Integrated Offset Stripe Mirroring
- and other similar RAID10 variants
Reference: Chapter 4 of
@@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
synchronisation state for each region.
[raid10_copies <# copies>]
- [raid10_format near]
+ [raid10_format <near|far|offset>]
These two options are used to alter the default layout of
a RAID10 configuration. The number of copies is can be
- specified, but the default is 2. There are other variations
- to how the copies are laid down - the default and only current
- option is "near". Near copies are what most people think of
- with respect to mirroring. If these options are left
- unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
- are given, then the layouts for 2, 3 and 4 devices are:
+ specified, but the default is 2. There are also three
+ variations to how the copies are laid down - the default
+ is "near". Near copies are what most people think of with
+ respect to mirroring. If these options are left unspecified,
+ or 'raid10_copies 2' and/or 'raid10_format near' are given,
+ then the layouts for 2, 3 and 4 devices are:
2 drives 3 drives 4 drives
-------- ---------- --------------
A1 A1 A1 A1 A2 A1 A1 A2 A2
@@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
3-device layout is what might be called a 'RAID1E - Integrated
Adjacent Stripe Mirroring'.
+ If 'raid10_copies 2' and 'raid10_format far', then the layouts
+ for 2, 3 and 4 devices are:
+ 2 drives 3 drives 4 drives
+ -------- -------------- --------------------
+ A1 A2 A1 A2 A3 A1 A2 A3 A4
+ A3 A4 A4 A5 A6 A5 A6 A7 A8
+ A5 A6 A7 A8 A9 A9 A10 A11 A12
+ .. .. .. .. .. .. .. .. ..
+ A2 A1 A3 A1 A2 A2 A1 A4 A3
+ A4 A3 A6 A4 A5 A6 A5 A8 A7
+ A6 A5 A9 A7 A8 A10 A9 A12 A11
+ .. .. .. .. .. .. .. .. ..
+
+ If 'raid10_copies 2' and 'raid10_format offset', then the
+ layouts for 2, 3 and 4 devices are:
+ 2 drives 3 drives 4 drives
+ -------- ------------ -----------------
+ A1 A2 A1 A2 A3 A1 A2 A3 A4
+ A2 A1 A3 A1 A2 A2 A1 A4 A3
+ A3 A4 A4 A5 A6 A5 A6 A7 A8
+ A4 A3 A6 A4 A5 A6 A5 A8 A7
+ A5 A6 A7 A8 A9 A9 A10 A11 A12
+ A6 A5 A9 A7 A8 A10 A9 A12 A11
+ .. .. .. .. .. .. .. .. ..
+ Here we see layouts closely akin to 'RAID1E - Integrated
+ Offset Stripe Mirroring'.
+
<#raid_devs>: The number of devices composing the array.
Each device consists of two entries. The first is the device
containing the metadata (if any); the second is the one containing the
@@ -142,3 +170,5 @@ Version History
1.3.0 Added support for RAID 10
1.3.1 Allow device replacement/rebuild for RAID 10
1.3.2 Fix/improve redundancy checking for RAID10
+1.4.0 Non-functional change. Removes arg from mapping function.
+1.4.1 Add RAID10 "far" and "offset" algorithm support.
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
index 13b707b7355..c3a14e0ad0a 100644
--- a/Documentation/devicetree/bindings/mfd/ab8500.txt
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -13,9 +13,6 @@ Required parent device properties:
4 = active high level-sensitive
8 = active low level-sensitive
-Optional parent device properties:
-- reg : contains the PRCMU mailbox address for the AB8500 i2c port
-
The AB8500 consists of a large and varied group of sub-devices:
Device IRQ Names Supply Names Description
@@ -86,9 +83,8 @@ Non-standard child device properties:
- stericsson,amic2-bias-vamic1 : Analoge Mic wishes to use a non-standard Vamic
- stericsson,earpeice-cmv : Earpeice voltage (only: 950 | 1100 | 1270 | 1580)
-ab8500@5 {
+ab8500 {
compatible = "stericsson,ab8500";
- reg = <5>; /* mailbox 5 is i2c */
interrupts = <0 40 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
index 1e1145ca4f3..8f01cb190f2 100644
--- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
@@ -11,6 +11,9 @@ Required properties:
- "nvidia,tegra20-uart"
- "nxp,lpc3220-uart"
- "ibm,qpace-nwp-serial"
+ - "altr,16550-FIFO32"
+ - "altr,16550-FIFO64"
+ - "altr,16550-FIFO128"
- "serial" if the port type is unknown.
- reg : offset and length of the register set for the device.
- interrupts : should contain uart interrupt.
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275
index 2cfa2566712..15b4a20d506 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275
@@ -15,7 +15,7 @@ Supported chips:
Addresses scanned: -
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/adt7410 b/Documentation/hwmon/adt7410
index 96004000dc2..58150c480e5 100644
--- a/Documentation/hwmon/adt7410
+++ b/Documentation/hwmon/adt7410
@@ -4,9 +4,14 @@ Kernel driver adt7410
Supported chips:
* Analog Devices ADT7410
Prefix: 'adt7410'
- Addresses scanned: I2C 0x48 - 0x4B
+ Addresses scanned: None
Datasheet: Publicly available at the Analog Devices website
http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
+ * Analog Devices ADT7420
+ Prefix: 'adt7420'
+ Addresses scanned: None
+ Datasheet: Publicly available at the Analog Devices website
+ http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
Author: Hartmut Knaack <knaack.h@gmx.de>
@@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
Besides, it can completely power down its ADC, if power management is
required.
+The ADT7420 is register compatible, the only differences being the package,
+a slightly narrower operating temperature range (-40°C to +150°C), and a
+better accuracy (0.25°C instead of 0.50°C.)
+
Configuration Notes
-------------------
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 16507712123..868d74d6b77 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -49,7 +49,7 @@ Supported chips:
Addresses scanned: I2C 0x18 - 0x1f
Author:
- Guenter Roeck <guenter.roeck@ericsson.com>
+ Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem
index 2ba5ed12685..83b2ddc160c 100644
--- a/Documentation/hwmon/lineage-pem
+++ b/Documentation/hwmon/lineage-pem
@@ -8,7 +8,7 @@ Supported devices:
Documentation:
http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index a21db81c459..26025e419d3 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -19,7 +19,7 @@ Supported chips:
Datasheet:
http://www.national.com/pf/LM/LM5066.html
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
index c365f9beb5d..e4d75c606c9 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978
@@ -5,13 +5,13 @@ Supported chips:
* Linear Technology LTC2978
Prefix: 'ltc2978'
Addresses scanned: -
- Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+ Datasheet: http://www.linear.com/product/ltc2978
* Linear Technology LTC3880
Prefix: 'ltc3880'
Addresses scanned: -
- Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
+ Datasheet: http://www.linear.com/product/ltc3880
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/ltc4261 b/Documentation/hwmon/ltc4261
index eba2e2c4b94..9378a75c613 100644
--- a/Documentation/hwmon/ltc4261
+++ b/Documentation/hwmon/ltc4261
@@ -8,7 +8,7 @@ Supported chips:
Datasheet:
http://cds.linear.com/docs/Datasheet/42612fb.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
index f8b478076f6..d59cc7829be 100644
--- a/Documentation/hwmon/max16064
+++ b/Documentation/hwmon/max16064
@@ -7,7 +7,7 @@ Supported chips:
Addresses scanned: -
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065
index c11f64a1f2a..208a29e4301 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065
@@ -24,7 +24,7 @@ Supported chips:
http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 47651ff341a..37cbf472a19 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -27,7 +27,7 @@ Supported chips:
Addresses scanned: -
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
index fe849871df3..e78078638b9 100644
--- a/Documentation/hwmon/max8688
+++ b/Documentation/hwmon/max8688
@@ -7,7 +7,7 @@ Supported chips:
Addresses scanned: -
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index 3d3a0f97f96..cf756ed48ff 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -34,7 +34,7 @@ Supported chips:
Addresses scanned: -
Datasheet: n.a.
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665
index 59e31614054..a341eeedab7 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665
@@ -29,7 +29,7 @@ Supported chips:
http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Module Parameters
diff --git a/Documentation/hwmon/ucd9000 b/Documentation/hwmon/ucd9000
index 0df5f276505..805e33edb97 100644
--- a/Documentation/hwmon/ucd9000
+++ b/Documentation/hwmon/ucd9000
@@ -11,7 +11,7 @@ Supported chips:
http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/ucd9200 b/Documentation/hwmon/ucd9200
index fd7d07b1908..1e8060e631b 100644
--- a/Documentation/hwmon/ucd9200
+++ b/Documentation/hwmon/ucd9200
@@ -15,7 +15,7 @@ Supported chips:
http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index 3d924b6b59e..756b57c6b73 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 3262b6e4d68..e544c7ff8cf 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -3,10 +3,26 @@ ALPS Touchpad Protocol
Introduction
------------
-
-Currently the ALPS touchpad driver supports four protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various
-protocol versions is contained in the following sections.
+Currently the ALPS touchpad driver supports five protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+
+Since roughly mid-2010 several new ALPS touchpads have been released and
+integrated into a variety of laptops and netbooks. These new touchpads
+have enough behavior differences that the alps_model_data definition
+table, describing the properties of the different versions, is no longer
+adequate. The design choices were to re-define the alps_model_data
+table, with the risk of regression testing existing devices, or isolate
+the new devices outside of the alps_model_data table. The latter design
+choice was made. The new touchpad signatures are named: "Rushmore",
+"Pinnacle", and "Dolphin", which you will see in the alps.c code.
+For the purposes of this document, this group of ALPS touchpads will
+generically be called "new ALPS touchpads".
+
+We experimented with probing the ACPI interface _HID (Hardware ID)/_CID
+(Compatibility ID) definition as a way to uniquely identify the
+different ALPS variants but there did not appear to be a 1:1 mapping.
+In fact, it appeared to be an m:n mapping between the _HID and actual
+hardware type.
Detection
---------
@@ -20,9 +36,13 @@ If the E6 report is successful, the touchpad model is identified using the "E7
report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
matched against known models in the alps_model_data_array.
-With protocol versions 3 and 4, the E7 report model signature is always
-73-02-64. To differentiate between these versions, the response from the
-"Enter Command Mode" sequence must be inspected as described below.
+For older touchpads supporting protocol versions 3 and 4, the E7 report
+model signature is always 73-02-64. To differentiate between these
+versions, the response from the "Enter Command Mode" sequence must be
+inspected as described below.
+
+The new ALPS touchpads have an E7 signature of 73-03-50 or 73-03-0A but
+seem to be better differentiated by the EC Command Mode response.
Command Mode
------------
@@ -47,6 +67,14 @@ address of the register being read, and the third contains the value of the
register. Registers are written by writing the value one nibble at a time
using the same encoding used for addresses.
+For the new ALPS touchpads, the EC command is used to enter command
+mode. The response in the new ALPS touchpads is significantly different,
+and more important in determining the behavior. This code has been
+separated from the original alps_model_data table and put in the
+alps_identify function. For example, there seem to be two hardware init
+sequences for the "Dolphin" touchpads as determined by the second byte
+of the EC response.
+
Packet Format
-------------
@@ -187,3 +215,28 @@ There are several things worth noting here.
well.
So far no v4 devices with tracksticks have been encountered.
+
+ALPS Absolute Mode - Protocol Version 5
+---------------------------------------
+This is basically Protocol Version 3 but with different logic for packet
+decode. It uses the same alps_process_touchpad_packet_v3 call with a
+specialized decode_fields function pointer to correctly interpret the
+packets. This appears to only be used by the Dolphin devices.
+
+For single-touch, the 6-byte packet format is:
+
+ byte 0: 1 1 0 0 1 0 0 0
+ byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ byte 2: 0 y6 y5 y4 y3 y2 y1 y0
+ byte 3: 0 M R L 1 m r l
+ byte 4: y10 y9 y8 y7 x10 x9 x8 x7
+ byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+
+For mt, the format is:
+
+ byte 0: 1 1 1 n3 1 n2 n1 x24
+ byte 1: 1 y7 y6 y5 y4 y3 y2 y1
+ byte 2: ? x2 x1 y12 y11 y10 y9 y8
+ byte 3: 0 x23 x22 x21 x20 x19 x18 x17
+ byte 4: 0 x9 x8 x7 x6 x5 x4 x3
+ byte 5: 0 x16 x15 x14 x13 x12 x11 x10
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index c0aab985bad..949d5dcdd9a 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
Proto [2 bytes]
Raw protocol(IP, IPv6, etc) frame.
+ 3.3 Multiqueue tuntap interface:
+
+ From version 3.8, Linux supports multiqueue tuntap which can uses multiple
+ file descriptors (queues) to parallelize packets sending or receiving. The
+ device allocation is the same as before, and if user wants to create multiple
+ queues, TUNSETIFF with the same device name must be called many times with
+ IFF_MULTI_QUEUE flag.
+
+ char *dev should be the name of the device, queues is the number of queues to
+ be created, fds is used to store and return the file descriptors (queues)
+ created to the caller. Each file descriptor were served as the interface of a
+ queue which could be accessed by userspace.
+
+ #include <linux/if.h>
+ #include <linux/if_tun.h>
+
+ int tun_alloc_mq(char *dev, int queues, int *fds)
+ {
+ struct ifreq ifr;
+ int fd, err, i;
+
+ if (!dev)
+ return -1;
+
+ memset(&ifr, 0, sizeof(ifr));
+ /* Flags: IFF_TUN - TUN device (no Ethernet headers)
+ * IFF_TAP - TAP device
+ *
+ * IFF_NO_PI - Do not provide packet information
+ * IFF_MULTI_QUEUE - Create a queue of multiqueue device
+ */
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
+ strcpy(ifr.ifr_name, dev);
+
+ for (i = 0; i < queues; i++) {
+ if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
+ goto err;
+ err = ioctl(fd, TUNSETIFF, (void *)&ifr);
+ if (err) {
+ close(fd);
+ goto err;
+ }
+ fds[i] = fd;
+ }
+
+ return 0;
+ err:
+ for (--i; i >= 0; i--)
+ close(fds[i]);
+ return err;
+ }
+
+ A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
+ calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
+ calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
+ enabled by default after it was created through TUNSETIFF.
+
+ fd is the file descriptor (queue) that we want to enable or disable, when
+ enable is true we enable it, otherwise we disable it
+
+ #include <linux/if.h>
+ #include <linux/if_tun.h>
+
+ int tun_set_queue(int fd, int enable)
+ {
+ struct ifreq ifr;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ if (enable)
+ ifr.ifr_flags = IFF_ATTACH_QUEUE;
+ else
+ ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+ return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
+ }
+
Universal TUN/TAP device driver Frequently Asked Question.
1. What platforms are supported by TUN/TAP driver ?
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 3035d00757a..425c51d56ae 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -1,6 +1,5 @@
-*=============*
-* OPP Library *
-*=============*
+Operating Performance Points (OPP) Library
+==========================================
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
@@ -16,15 +15,31 @@ Contents
1. Introduction
===============
+1.1 What is an Operating Performance Point (OPP)?
+
Complex SoCs of today consists of a multiple sub-modules working in conjunction.
In an operational system executing varied use cases, not all modules in the SoC
need to function at their highest performing frequency all the time. To
facilitate this, sub-modules in a SoC are grouped into domains, allowing some
-domains to run at lower voltage and frequency while other domains are loaded
-more. The set of discrete tuples consisting of frequency and voltage pairs that
+domains to run at lower voltage and frequency while other domains run at
+voltage/frequency pairs that are higher.
+
+The set of discrete tuples consisting of frequency and voltage pairs that
the device will support per domain are called Operating Performance Points or
OPPs.
+As an example:
+Let us consider an MPU device which supports the following:
+{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
+{1GHz at minimum voltage of 1.3V}
+
+We can represent these as three OPPs as the following {Hz, uV} tuples:
+{300000000, 1000000}
+{800000000, 1200000}
+{1000000000, 1300000}
+
+1.2 Operating Performance Points Library
+
OPP library provides a set of helper functions to organize and query the OPP
information. The library is located in drivers/base/power/opp.c and the header
is located in include/linux/opp.h. OPP library can be enabled by enabling
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index e8a6aa473ba..6e953564de0 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -170,5 +170,5 @@ Reminder: sizeof() result is of type size_t.
Thank you for your cooperation and attention.
-By Randy Dunlap <rdunlap@xenotime.net> and
+By Randy Dunlap <rdunlap@infradead.org> and
Andrew Murray <amurray@mpc-data.co.uk>
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 53d6a3c51d8..a372304aef1 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1873,7 +1873,7 @@ feature:
status\input | 0 | 1 | else |
--------------+------------+------------+------------+
- not allocated |(do nothing)| alloc+swap | EINVAL |
+ not allocated |(do nothing)| alloc+swap |(do nothing)|
--------------+------------+------------+------------+
allocated | free | swap | clear |
--------------+------------+------------+------------+
diff --git a/MAINTAINERS b/MAINTAINERS
index e95b1e944eb..50b4d735f96 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
-----------------------------------
-3C505 NETWORK DRIVER
-M: Philip Blundell <philb@gnu.org>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/i825xx/3c505*
-
3C59X NETWORK DRIVER
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
L: netdev@vger.kernel.org
@@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
S: Maintained
F: drivers/video/cyber2000fb.*
-CYCLADES 2X SYNC CARD DRIVER
-M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-W: http://oops.ghostprotocols.net:81/blog
-S: Maintained
-F: drivers/net/wan/cycx*
-
CYCLADES ASYNC MUX DRIVER
W: http://www.cyclades.com/
S: Orphan
@@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
F: drivers/video/s1d13xxxfb.c
F: include/video/s1d13xxxfb.h
-ETHEREXPRESS-16 NETWORK DRIVER
-M: Philip Blundell <philb@gnu.org>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/i825xx/eexpress.*
-
ETHERNET BRIDGE
M: Stephen Hemminger <stephen@networkplumber.org>
L: bridge@lists.linux-foundation.org
@@ -4023,6 +4005,22 @@ M: Stanislaw Gruszka <stf_xl@wp.pl>
S: Maintained
F: drivers/usb/atm/ueagle-atm.c
+INA209 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/ina209
+F: Documentation/devicetree/bindings/i2c/ina209.txt
+F: drivers/hwmon/ina209.c
+
+INA2XX HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/ina2xx
+F: drivers/hwmon/ina2xx.c
+F: include/linux/platform_data/ina2xx.h
+
INDUSTRY PACK SUBSYSTEM (IPACK)
M: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
M: Jens Taprogge <jens.taprogge@taprogge.org>
@@ -5116,6 +5114,15 @@ S: Maintained
F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
+MAX6697 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: lm-sensors@lm-sensors.org
+S: Maintained
+F: Documentation/hwmon/max6697
+F: Documentation/devicetree/bindings/i2c/max6697.txt
+F: drivers/hwmon/max6697.c
+F: include/linux/platform_data/max6697.h
+
MAXIRADIO FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -6430,6 +6437,8 @@ F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
+M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Shahed Shaikh <shahed.shaikh@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: linux-driver@qlogic.com
diff --git a/Makefile b/Makefile
index 5bd9f7700eb..22113a77f8e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Unicycling Gorilla
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 5a1779c9394..1455579791e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -319,13 +319,6 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
-config HAVE_VIRT_TO_BUS
- bool
- help
- An architecture should select this if it implements the
- deprecated interface virt_to_bus(). All new architectures
- should probably not select this.
-
config HAVE_ARCH_SECCOMP_FILTER
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5833aa44148..8a33ba01301 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,7 @@ config ALPHA
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S
index b06812bcac8..8efb26686d4 100644
--- a/arch/alpha/boot/head.S
+++ b/arch/alpha/boot/head.S
@@ -4,6 +4,7 @@
* initial bootloader stuff..
*/
+#include <asm/pal.h>
.set noreorder
.globl __start
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index dedf02b6f32..7caf8ddea04 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,7 +49,7 @@ config ARM
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@@ -556,7 +556,6 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLK_DOVE
select CPU_V7
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
@@ -1600,6 +1599,14 @@ config HAVE_ARM_TWD
help
This options enables support for the ARM timer and watchdog unit
+config MCPM
+ bool "Multi-Cluster Power Management"
+ depends on CPU_V7 && SMP
+ help
+ This option provides the common power management infrastructure
+ for (multi-)cluster based systems, such as big.LITTLE based
+ systems.
+
choice
prompt "Memory split"
default VMSPLIT_3G
@@ -1657,13 +1664,16 @@ config LOCAL_TIMERS
accounting to be spread across the timer interval, preventing a
"thundering herd" at every timer tick.
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
- default 355 if ARCH_U8500
- default 264 if MACH_H4700
default 512 if SOC_OMAP5
+ default 355 if ARCH_U8500
default 288 if ARCH_VT8500 || ARCH_SUNXI
+ default 264 if MACH_H4700
default 0
help
Maximum number of GPIOs in the system.
@@ -1888,8 +1898,9 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM (EXPERIMENTAL)"
- depends on ARM && OF
+ depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
+ depends on !GENERIC_ATOMIC64
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index acddddac7ee..ecfcdba2d17 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -492,7 +492,7 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX31_UART || \
DEBUG_IMX35_UART || \
DEBUG_IMX51_UART || \
- DEBUG_IMX50_IMX53_UART || \
+ DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART
default 1
help
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 71768b8a1ab..84aa2caf07e 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -115,4 +115,4 @@ i:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
-subdir- := bootp compressed
+subdir- := bootp compressed dts
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 5cad8a6dadb..afed28e37ea 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
-ccflags-y := -fpic -fno-builtin -I$(obj)
+ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index f8e4855bc9a..070bba4f258 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -64,5 +64,13 @@
status = "okay";
/* No CD or WP GPIOs */
};
+
+ usb@d0050000 {
+ status = "okay";
+ };
+
+ usb@d0051000 {
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 6f1acc75e15..5b708208b60 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -31,7 +31,6 @@
mpic: interrupt-controller@d0020000 {
compatible = "marvell,mpic";
#interrupt-cells = <1>;
- #address-cells = <1>;
#size-cells = <1>;
interrupt-controller;
};
@@ -54,7 +53,7 @@
reg = <0xd0012000 0x100>;
reg-shift = <2>;
interrupts = <41>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
serial@d0012100 {
@@ -62,7 +61,7 @@
reg = <0xd0012100 0x100>;
reg-shift = <2>;
interrupts = <42>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 1443949c165..ca00d8326c8 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -46,7 +46,7 @@
reg = <0xd0012200 0x100>;
reg-shift = <2>;
interrupts = <43>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
serial@d0012300 {
@@ -54,7 +54,7 @@
reg = <0xd0012300 0x100>;
reg-shift = <2>;
interrupts = <44>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 4bf2a8774aa..7e0481e2441 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -105,7 +105,7 @@
compatible = "fixed-clock";
reg = <1>;
#clock-cells = <0>;
- clock-frequency = <150000000>;
+ clock-frequency = <250000000>;
};
};
};
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 69140ba99f4..9de93096601 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -319,9 +319,8 @@
};
};
- ab8500@5 {
+ ab8500 {
compatible = "stericsson,ab8500";
- reg = <5>; /* mailbox 5 is i2c */
interrupt-parent = <&intc>;
interrupts = <0 40 0x4>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 67dbe20868a..f7509cafc37 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -197,6 +197,11 @@
status = "disabled";
};
+ rtc@d8500 {
+ compatible = "marvell,orion-rtc";
+ reg = <0xd8500 0x20>;
+ };
+
crypto: crypto@30000 {
compatible = "marvell,orion-crypto";
reg = <0x30000 0x10000>,
diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi
index 592fb9dc35b..379128eb9d9 100644
--- a/arch/arm/boot/dts/href.dtsi
+++ b/arch/arm/boot/dts/href.dtsi
@@ -221,7 +221,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts
index 55f4191a626..2b587a74b81 100644
--- a/arch/arm/boot/dts/hrefv60plus.dts
+++ b/arch/arm/boot/dts/hrefv60plus.dts
@@ -158,7 +158,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index e54fffd4836..468c0a1d48d 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -42,10 +42,9 @@
fsl,pins = <689 0x10000 /* DISP1_DRDY */
482 0x10000 /* DISP1_HSYNC */
489 0x10000 /* DISP1_VSYNC */
- 684 0x10000 /* DISP1_DAT_0 */
515 0x10000 /* DISP1_DAT_22 */
523 0x10000 /* DISP1_DAT_23 */
- 543 0x10000 /* DISP1_DAT_21 */
+ 545 0x10000 /* DISP1_DAT_21 */
553 0x10000 /* DISP1_DAT_20 */
558 0x10000 /* DISP1_DAT_19 */
564 0x10000 /* DISP1_DAT_18 */
diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts
index 5bb0bf39d3b..c9c44b2f62d 100644
--- a/arch/arm/boot/dts/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/kirkwood-dns320.dts
@@ -42,12 +42,10 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
serial@12100 {
- clock-frequency = <166666667>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts
index d430713ea9b..e4e4930dc5c 100644
--- a/arch/arm/boot/dts/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/kirkwood-dns325.dts
@@ -50,7 +50,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 2e3dd34e21a..0196cf6b0ef 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -37,7 +37,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index ef2d8c70570..289e51d8637 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -38,7 +38,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index 1b133e0c566..bd83b8fc7c8 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -73,7 +73,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index 71902da33d6..5335b1aa860 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -51,7 +51,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 504f16be8b5..12ccf74ac3c 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -78,7 +78,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 6cae4599c4b..93c3afbef9e 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -115,7 +115,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 8db3123ac80..5bbd0542cdd 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -34,7 +34,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts
index 9510c9ea666..9f55d95f35f 100644
--- a/arch/arm/boot/dts/kirkwood-lschlv2.dts
+++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts
@@ -13,7 +13,6 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts
index 739019c4cba..5c84c118ed8 100644
--- a/arch/arm/boot/dts/kirkwood-lsxhl.dts
+++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts
@@ -13,7 +13,6 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index 662dfd81b1c..758824118a9 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -90,7 +90,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index e8e7ecef165..6affd924fe1 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -23,7 +23,6 @@
};
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 3a178cf708d..a7412b937a8 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -117,7 +117,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index ede7fe0d7a8..d27f7245f8e 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -18,12 +18,10 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
serial@12100 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 842ff95d60d..66eb45b00b2 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -108,7 +108,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 2c738d9dc82..fada7e6d24d 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -38,6 +38,7 @@
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <35>, <36>, <37>, <38>;
+ clocks = <&gate_clk 7>;
};
gpio1: gpio@10140 {
@@ -49,6 +50,7 @@
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <39>, <40>, <41>;
+ clocks = <&gate_clk 7>;
};
serial@12000 {
@@ -57,7 +59,6 @@
reg-shift = <2>;
interrupts = <33>;
clocks = <&gate_clk 7>;
- /* set clock-frequency in board dts */
status = "disabled";
};
@@ -67,7 +68,6 @@
reg-shift = <2>;
interrupts = <34>;
clocks = <&gate_clk 7>;
- /* set clock-frequency in board dts */
status = "disabled";
};
@@ -75,6 +75,7 @@
compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc";
reg = <0x10300 0x20>;
interrupts = <53>;
+ clocks = <&gate_clk 7>;
};
spi@10600 {
diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
index 5a3a58b7e18..0077fc8510b 100644
--- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
+++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
@@ -11,7 +11,7 @@
/ {
model = "LaCie Ethernet Disk mini V2";
- compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x";
+ compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
reg = <0x00000000 0x4000000>; /* 64 MB */
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index 27f31a5fa49..d3ec32f6b79 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -298,7 +298,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 936d2306e7e..7e8769bd597 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -75,6 +75,9 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0xffe01000 0x1000>;
interrupts = <0 180 4>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9a428931d04..48d00a099ce 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -118,6 +118,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x50040600 0x20>;
interrupts = <1 13 0x304>;
+ clocks = <&tegra_car 132>;
};
intc: interrupt-controller {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 767803e1fd5..9d87a3ffe99 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -119,6 +119,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x50040600 0x20>;
interrupts = <1 13 0xf04>;
+ clocks = <&tegra_car 214>;
};
intc: interrupt-controller {
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index dc8dd0de5c0..48c0ee57817 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644
index 00000000000..370236dd1a0
--- /dev/null
+++ b/arch/arm/common/mcpm_entry.c
@@ -0,0 +1,263 @@
+/*
+ * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+
+#include <asm/mcpm.h>
+#include <asm/cacheflush.h>
+#include <asm/idmap.h>
+#include <asm/cputype.h>
+
+extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
+{
+ unsigned long val = ptr ? virt_to_phys(ptr) : 0;
+ mcpm_entry_vectors[cluster][cpu] = val;
+ sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
+}
+
+static const struct mcpm_platform_ops *platform_ops;
+
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
+{
+ if (platform_ops)
+ return -EBUSY;
+ platform_ops = ops;
+ return 0;
+}
+
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
+{
+ if (!platform_ops)
+ return -EUNATCH; /* try not to shadow power_up errors */
+ might_sleep();
+ return platform_ops->power_up(cpu, cluster);
+}
+
+typedef void (*phys_reset_t)(unsigned long);
+
+void mcpm_cpu_power_down(void)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * Do this before calling into the power_down method,
+ * as it might not always be safe to do afterwards.
+ */
+ setup_mm_for_reboot();
+
+ platform_ops->power_down();
+
+ /*
+ * It is possible for a power_up request to happen concurrently
+ * with a power_down request for the same CPU. In this case the
+ * power_down method might not be able to actually enter a
+ * powered down state with the WFI instruction if the power_up
+ * method has removed the required reset condition. The
+ * power_down method is then allowed to return. We must perform
+ * a re-entry in the kernel as if the power_up method just had
+ * deasserted reset on the CPU.
+ *
+ * To simplify race issues, the platform specific implementation
+ * must accommodate for the possibility of unordered calls to
+ * power_down and power_up with a usage count. Therefore, if a
+ * call to power_up is issued for a CPU that is not down, then
+ * the next call to power_down must not attempt a full shutdown
+ * but only do the minimum (normally disabling L1 cache and CPU
+ * coherency) and return just as if a concurrent power_up request
+ * had happened as described above.
+ */
+
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+
+ /* should never get here */
+ BUG();
+}
+
+void mcpm_cpu_suspend(u64 expected_residency)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /* Very similar to mcpm_cpu_power_down() */
+ setup_mm_for_reboot();
+ platform_ops->suspend(expected_residency);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+}
+
+int mcpm_cpu_powered_up(void)
+{
+ if (!platform_ops)
+ return -EUNATCH;
+ if (platform_ops->powered_up)
+ platform_ops->powered_up();
+ return 0;
+}
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ * This must be called at the point of committing to teardown of a CPU.
+ * The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ * cluster can be torn down without disrupting this CPU.
+ * To avoid deadlocks, this must be called before a CPU is powered down.
+ * The CPU cache (SCTRL.C bit) is expected to be off.
+ * However L2 cache might or might not be active.
+ */
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ * CLUSTER_UP: no destructive teardown was done and the cluster has been
+ * restored to the previous state (CPU cache still active); or
+ * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ * (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cluster = state;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete. CPU cache expected to be active.
+ *
+ * Returns:
+ * false: the critical section was not entered because an inbound CPU was
+ * observed, or the cluster is already being set up;
+ * true: the critical section was entered: it is now safe to tear down the
+ * cluster.
+ */
+bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int i;
+ struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+ /* Warn inbound CPUs that the cluster is being torn down: */
+ c->cluster = CLUSTER_GOING_DOWN;
+ sync_cache_w(&c->cluster);
+
+ /* Back out if the inbound cluster is already in the critical region: */
+ sync_cache_r(&c->inbound);
+ if (c->inbound == INBOUND_COMING_UP)
+ goto abort;
+
+ /*
+ * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+ * teardown is complete on each CPU before tearing down the cluster.
+ *
+ * If any CPU has been woken up again from the DOWN state, then we
+ * shouldn't be taking the cluster down at all: abort in that case.
+ */
+ sync_cache_r(&c->cpus);
+ for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+ int cpustate;
+
+ if (i == cpu)
+ continue;
+
+ while (1) {
+ cpustate = c->cpus[i].cpu;
+ if (cpustate != CPU_GOING_DOWN)
+ break;
+
+ wfe();
+ sync_cache_r(&c->cpus[i].cpu);
+ }
+
+ switch (cpustate) {
+ case CPU_DOWN:
+ continue;
+
+ default:
+ goto abort;
+ }
+ }
+
+ return true;
+
+abort:
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+ return false;
+}
+
+int __mcpm_cluster_state(unsigned int cluster)
+{
+ sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+ return mcpm_sync.clusters[cluster].cluster;
+}
+
+extern unsigned long mcpm_power_up_setup_phys;
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level))
+{
+ unsigned int i, j, mpidr, this_cluster;
+
+ BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
+ BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
+
+ /*
+ * Set initial CPU and cluster states.
+ * Only one cluster is assumed to be active at this point.
+ */
+ for (i = 0; i < MAX_NR_CLUSTERS; i++) {
+ mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
+ mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
+ for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
+ mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
+ }
+ mpidr = read_cpuid_mpidr();
+ this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ for_each_online_cpu(i)
+ mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+ mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
+ sync_cache_w(&mcpm_sync);
+
+ if (power_up_setup) {
+ mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
+ sync_cache_w(&mcpm_power_up_setup_phys);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644
index 00000000000..8178705c4b2
--- /dev/null
+++ b/arch/arm/common/mcpm_head.S
@@ -0,0 +1,219 @@
+/*
+ * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
+ * for details of the synchronisation algorithms used here.
+ */
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+#include "vlock.h"
+
+.if MCPM_SYNC_CLUSTER_CPUS
+.error "cpus must be the first member of struct mcpm_sync_struct"
+.endif
+
+ .macro pr_dbg string
+#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
+ b 1901f
+1902: .asciz "CPU"
+1903: .asciz " cluster"
+1904: .asciz ": \string"
+ .align
+1901: adr r0, 1902b
+ bl printascii
+ mov r0, r9
+ bl printhex8
+ adr r0, 1903b
+ bl printascii
+ mov r0, r10
+ bl printhex8
+ adr r0, 1904b
+ bl printascii
+#endif
+ .endm
+
+ .arm
+ .align
+
+ENTRY(mcpm_entry_point)
+
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
+ THUMB( .thumb )
+1:
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r9, r0, #0, #8 @ r9 = cpu
+ ubfx r10, r0, #8, #8 @ r10 = cluster
+ mov r3, #MAX_CPUS_PER_CLUSTER
+ mla r4, r3, r10, r9 @ r4 = canonical CPU index
+ cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
+ blo 2f
+
+ /* We didn't expect this CPU. Try to cheaply make it quiet. */
+1: wfi
+ wfe
+ b 1b
+
+2: pr_dbg "kernel mcpm_entry_point\n"
+
+ /*
+ * MMU is off so we need to get to various variables in a
+ * position independent way.
+ */
+ adr r5, 3f
+ ldmia r5, {r6, r7, r8, r11}
+ add r6, r5, r6 @ r6 = mcpm_entry_vectors
+ ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
+ add r8, r5, r8 @ r8 = mcpm_sync
+ add r11, r5, r11 @ r11 = first_man_locks
+
+ mov r0, #MCPM_SYNC_CLUSTER_SIZE
+ mla r8, r0, r10, r8 @ r8 = sync cluster base
+
+ @ Signal that this CPU is coming UP:
+ mov r0, #CPU_COMING_UP
+ mov r5, #MCPM_SYNC_CPU_SIZE
+ mla r5, r9, r5, r8 @ r5 = sync cpu address
+ strb r0, [r5]
+
+ @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
+ @ state, because there is at least one active CPU (this CPU).
+
+ mov r0, #VLOCK_SIZE
+ mla r11, r0, r10, r11 @ r11 = cluster first man lock
+ mov r0, r11
+ mov r1, r9 @ cpu
+ bl vlock_trylock @ implies DMB
+
+ cmp r0, #0 @ failed to get the lock?
+ bne mcpm_setup_wait @ wait for cluster setup if so
+
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP @ cluster already up?
+ bne mcpm_setup @ if not, set up the cluster
+
+ @ Otherwise, release the first man lock and skip setup:
+ mov r0, r11
+ bl vlock_unlock
+ b mcpm_setup_complete
+
+mcpm_setup:
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ @ Signal that the cluster is being brought up:
+ mov r0, #INBOUND_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dmb
+
+ @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
+ @ point onwards will observe INBOUND_COMING_UP and abort.
+
+ @ Wait for any previously-pending cluster teardown operations to abort
+ @ or complete:
+mcpm_teardown_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_GOING_DOWN
+ bne first_man_setup
+ wfe
+ b mcpm_teardown_wait
+
+first_man_setup:
+ dmb
+
+ @ If the outbound gave up before teardown started, skip cluster setup:
+
+ cmp r0, #CLUSTER_UP
+ beq mcpm_setup_leave
+
+ @ power_up_setup is now responsible for setting up the cluster:
+
+ cmp r7, #0
+ mov r0, #1 @ second (cluster) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ mov r0, #CLUSTER_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ dmb
+
+mcpm_setup_leave:
+ @ Leave the cluster setup critical section:
+
+ mov r0, #INBOUND_NOT_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dsb
+ sev
+
+ mov r0, r11
+ bl vlock_unlock @ implies DMB
+ b mcpm_setup_complete
+
+ @ In the contended case, non-first men wait here for cluster setup
+ @ to complete:
+mcpm_setup_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP
+ wfene
+ bne mcpm_setup_wait
+ dmb
+
+mcpm_setup_complete:
+ @ If a platform-specific CPU setup hook is needed, it is
+ @ called from here.
+
+ cmp r7, #0
+ mov r0, #0 @ first (CPU) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ @ Mark the CPU as up:
+
+ mov r0, #CPU_UP
+ strb r0, [r5]
+
+ @ Observability order of CPU_UP and opening of the gate does not matter.
+
+mcpm_entry_gated:
+ ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
+ cmp r5, #0
+ wfeeq
+ beq mcpm_entry_gated
+ dmb
+
+ pr_dbg "released\n"
+ bx r5
+
+ .align 2
+
+3: .word mcpm_entry_vectors - .
+ .word mcpm_power_up_setup_phys - 3b
+ .word mcpm_sync - 3b
+ .word first_man_locks - 3b
+
+ENDPROC(mcpm_entry_point)
+
+ .bss
+
+ .align CACHE_WRITEBACK_ORDER
+ .type first_man_locks, #object
+first_man_locks:
+ .space VLOCK_SIZE * MAX_NR_CLUSTERS
+ .align CACHE_WRITEBACK_ORDER
+
+ .type mcpm_entry_vectors, #object
+ENTRY(mcpm_entry_vectors)
+ .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
+ .type mcpm_power_up_setup_phys, #object
+ENTRY(mcpm_power_up_setup_phys)
+ .space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644
index 00000000000..52b88d81b7b
--- /dev/null
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -0,0 +1,92 @@
+/*
+ * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Code to handle secondary CPU bringup and hotplug for the cluster power API.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/mcpm.h>
+#include <asm/smp.h>
+#include <asm/smp_plat.h>
+
+static void __init simple_smp_init_cpus(void)
+{
+}
+
+static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned int mpidr, pcpu, pcluster, ret;
+ extern void secondary_startup(void);
+
+ mpidr = cpu_logical_map(cpu);
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
+ __func__, cpu, pcpu, pcluster);
+
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ ret = mcpm_cpu_power_up(pcpu, pcluster);
+ if (ret)
+ return ret;
+ mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ dsb_sev();
+ return 0;
+}
+
+static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+{
+ mcpm_cpu_powered_up();
+ gic_secondary_init(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int mcpm_cpu_disable(unsigned int cpu)
+{
+ /*
+ * We assume all CPUs may be shut down.
+ * This would be the hook to use for eventual Secure
+ * OS migration requests as described in the PSCI spec.
+ */
+ return 0;
+}
+
+static void mcpm_cpu_die(unsigned int cpu)
+{
+ unsigned int mpidr, pcpu, pcluster;
+ mpidr = read_cpuid_mpidr();
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ mcpm_cpu_power_down();
+}
+
+#endif
+
+static struct smp_operations __initdata mcpm_smp_ops = {
+ .smp_init_cpus = simple_smp_init_cpus,
+ .smp_boot_secondary = mcpm_boot_secondary,
+ .smp_secondary_init = mcpm_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = mcpm_cpu_disable,
+ .cpu_die = mcpm_cpu_die,
+#endif
+};
+
+void __init mcpm_smp_set_ops(void)
+{
+ smp_set_ops(&mcpm_smp_ops);
+}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 00000000000..ff198583f68
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
+/*
+ * vlock.S - simple voting lock implementation for ARM
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * This algorithm is described in more detail in
+ * Documentation/arm/vlocks.txt.
+ */
+
+#include <linux/linkage.h>
+#include "vlock.h"
+
+/* Select different code if voting flags can fit in a single word. */
+#if VLOCK_VOTING_SIZE > 4
+#define FEW(x...)
+#define MANY(x...) x
+#else
+#define FEW(x...) x
+#define MANY(x...)
+#endif
+
+@ voting lock for first-man coordination
+
+.macro voting_begin rbase:req, rcpu:req, rscratch:req
+ mov \rscratch, #1
+ strb \rscratch, [\rbase, \rcpu]
+ dmb
+.endm
+
+.macro voting_end rbase:req, rcpu:req, rscratch:req
+ dmb
+ mov \rscratch, #0
+ strb \rscratch, [\rbase, \rcpu]
+ dsb
+ sev
+.endm
+
+/*
+ * The vlock structure must reside in Strongly-Ordered or Device memory.
+ * This implementation deliberately eliminates most of the barriers which
+ * would be required for other memory types, and assumes that independent
+ * writes to neighbouring locations within a cacheline do not interfere
+ * with one another.
+ */
+
+@ r0: lock structure base
+@ r1: CPU ID (0-based index within cluster)
+ENTRY(vlock_trylock)
+ add r1, r1, #VLOCK_VOTING_OFFSET
+
+ voting_begin r0, r1, r2
+
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
+ cmp r2, #VLOCK_OWNER_NONE
+ bne trylock_fail @ fail if so
+
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
+
+ voting_end r0, r1, r2 @ implies DMB
+
+ @ Wait for the current round of voting to finish:
+
+ MANY( mov r3, #VLOCK_VOTING_OFFSET )
+0:
+ MANY( ldr r2, [r0, r3] )
+ FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
+ cmp r2, #0
+ wfene
+ bne 0b
+ MANY( add r3, r3, #4 )
+ MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
+ MANY( bne 0b )
+
+ @ Check who won:
+
+ dmb
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
+ eor r0, r1, r2 @ zero if I won, else nonzero
+ bx lr
+
+trylock_fail:
+ voting_end r0, r1, r2
+ mov r0, #1 @ nonzero indicates that I lost
+ bx lr
+ENDPROC(vlock_trylock)
+
+@ r0: lock structure base
+ENTRY(vlock_unlock)
+ dmb
+ mov r1, #VLOCK_OWNER_NONE
+ strb r1, [r0, #VLOCK_OWNER_OFFSET]
+ dsb
+ sev
+ bx lr
+ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644
index 00000000000..3b441475a59
--- /dev/null
+++ b/arch/arm/common/vlock.h
@@ -0,0 +1,29 @@
+/*
+ * vlock.h - simple voting lock implementation
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VLOCK_H
+#define __VLOCK_H
+
+#include <asm/mcpm.h>
+
+/* Offsets and sizes are rounded to a word (4 bytes) */
+#define VLOCK_OWNER_OFFSET 0
+#define VLOCK_VOTING_OFFSET 4
+#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
+#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
+#define VLOCK_OWNER_NONE 0
+
+#endif /* ! __VLOCK_H */
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index fbbc5bb022d..6a99e30f81d 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -116,6 +116,7 @@ CONFIG_SND_SOC=y
CONFIG_SND_MXS_SOC=y
CONFIG_SND_SOC_MXS_SGTL5000=y
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_STORAGE=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b16bae2c9a6..bd07864f14a 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -126,6 +126,8 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_TWL4030_PWRBUTTON=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd1..bff71388e72 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
flush_cache_all();
}
+/*
+ * Memory synchronization helpers for mixed cached vs non cached accesses.
+ *
+ * Some synchronization algorithms have to set states in memory with the
+ * cache enabled or disabled depending on the code path. It is crucial
+ * to always ensure proper cache maintenance to update main memory right
+ * away in that case.
+ *
+ * Any cached write must be followed by a cache clean operation.
+ * Any cached read must be preceded by a cache invalidate operation.
+ * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
+ * operation is needed to avoid discarding possible concurrent writes to the
+ * accessed memory.
+ *
+ * Also, in order to prevent a cached writer from interfering with an
+ * adjacent non-cached writer, each state variable must be located to
+ * a separate cache line.
+ */
+
+/*
+ * This needs to be >= the max cache writeback size of all
+ * supported platforms included in the current kernel configuration.
+ * This is used to align state variables to their own cache lines.
+ */
+#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
+#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
+
+/*
+ * There is no __cpuc_clean_dcache_area but we use it anyway for
+ * code intent clarity, and alias it to __cpuc_flush_dcache_area.
+ */
+#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
+
+/*
+ * Ensure preceding writes to *p by this CPU are visible to
+ * subsequent reads by other CPUs:
+ */
+static inline void __sync_cache_range_w(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+ __cpuc_clean_dcache_area(_p, size);
+ outer_clean_range(__pa(_p), __pa(_p + size));
+}
+
+/*
+ * Ensure preceding writes to *p by other CPUs are visible to
+ * subsequent reads by this CPU. We must be careful not to
+ * discard data simultaneously written by another CPU, hence the
+ * usage of flush rather than invalidate operations.
+ */
+static inline void __sync_cache_range_r(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+#ifdef CONFIG_OUTER_CACHE
+ if (outer_cache.flush_range) {
+ /*
+ * Ensure dirty data migrated from other CPUs into our cache
+ * are cleaned out safely before the outer cache is cleaned:
+ */
+ __cpuc_clean_dcache_area(_p, size);
+
+ /* Clean and invalidate stale data for *p from outer ... */
+ outer_flush_range(__pa(_p), __pa(_p + size));
+ }
+#endif
+
+ /* ... and inner cache: */
+ __cpuc_flush_dcache_area(_p, size);
+}
+
+#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
+#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+
#endif
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 00000000000..0f7b7620e9a
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,209 @@
+/*
+ * arch/arm/include/asm/mcpm.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MCPM_H
+#define MCPM_H
+
+/*
+ * Maximum number of possible clusters / CPUs per cluster.
+ *
+ * This should be sufficient for quite a while, while keeping the
+ * (assembly) code simpler. When this starts to grow then we'll have
+ * to consider dynamic allocation.
+ */
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_NR_CLUSTERS 2
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Platform specific code should use this symbol to set up secondary
+ * entry location for processors to use when released from reset.
+ */
+extern void mcpm_entry_point(void);
+
+/*
+ * This is used to indicate where the given CPU from given cluster should
+ * branch once it is ready to re-enter the kernel using ptr, or NULL if it
+ * should be gated. A gated CPU is held in a WFE loop until its vector
+ * becomes non NULL.
+ */
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+
+/*
+ * CPU/cluster power operations API for higher subsystems to use.
+ */
+
+/**
+ * mcpm_cpu_power_up - make given CPU in given cluster runable
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * The identified CPU is brought out of reset. If the cluster was powered
+ * down then it is brought up as well, taking care not to let the other CPUs
+ * in the cluster run, and ensuring appropriate cluster setup.
+ *
+ * Caller must ensure the appropriate entry vector is initialized with
+ * mcpm_set_entry_vector() prior to calling this.
+ *
+ * This must be called in a sleepable context. However, the implementation
+ * is strongly encouraged to return early and let the operation happen
+ * asynchronously, especially when significant delays are expected.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_power_down - power the calling CPU down
+ *
+ * The calling CPU is powered down.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster is prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return. Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_power_down(void);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ *
+ * @expected_residency: duration in microseconds the CPU is expected
+ * to remain suspended, or 0 if unknown/infinity.
+ *
+ * The calling CPU is suspended. The expected residency argument is used
+ * as a hint by the platform specific backend to implement the appropriate
+ * sleep state level according to the knowledge it has on wake-up latency
+ * for the given hardware.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster may be prepared for power-down too, if the expected
+ * residency makes it worthwhile.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return. Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_suspend(u64 expected_residency);
+
+/**
+ * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
+ *
+ * This lets the platform specific backend code perform needed housekeeping
+ * work. This must be called by the newly activated CPU as soon as it is
+ * fully operational in kernel space, before it enables interrupts.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_powered_up(void);
+
+/*
+ * Platform specific methods used in the implementation of the above API.
+ */
+struct mcpm_platform_ops {
+ int (*power_up)(unsigned int cpu, unsigned int cluster);
+ void (*power_down)(void);
+ void (*suspend)(u64);
+ void (*powered_up)(void);
+};
+
+/**
+ * mcpm_platform_register - register platform specific power methods
+ *
+ * @ops: mcpm_platform_ops structure to register
+ *
+ * An error is returned if the registration has been done previously.
+ */
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+
+/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+
+/*
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+extern unsigned long sync_phys; /* physical address of *mcpm_sync */
+
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
+bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
+int __mcpm_cluster_state(unsigned int cluster);
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+void __init mcpm_smp_set_ops(void);
+
+#else
+
+/*
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files. Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN 0x11
+#define CPU_COMING_UP 0x12
+#define CPU_UP 0x13
+#define CPU_GOING_DOWN 0x14
+
+#define CLUSTER_DOWN 0x21
+#define CLUSTER_UP 0x22
+#define CLUSTER_GOING_DOWN 0x23
+
+#define INBOUND_NOT_COMING_UP 0x31
+#define INBOUND_COMING_UP 0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+ (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+ (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+ (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
+#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3..e3d55547e75 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
- u64 id;
+ atomic64_t id;
#endif
- unsigned int vmalloc_seq;
+ unsigned int vmalloc_seq;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
+#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#else
#define ASID(mm) (0)
#endif
@@ -26,7 +26,7 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
- unsigned long end_brk;
+ unsigned long end_brk;
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc..863a6611323 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
#else /* !CONFIG_CPU_HAS_ASID */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77e..4db8c8820f0 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,10 +34,13 @@
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
+#define TLB_V6_BP (1 << 19)
+
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE (1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP (1 << 23)
#define TLB_BARRIER (1 << 28)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -150,7 +153,8 @@
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
- TLB_V6_I_ASID | TLB_V6_D_ASID)
+ TLB_V6_I_ASID | TLB_V6_D_ASID | \
+ TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -166,9 +170,11 @@
#endif
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+ TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+ TLB_V6_U_ASID | TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V7
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
}
}
+static inline void local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+ else if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+
+ if (tlb_flag(TLB_BARRIER))
+ isb();
+}
+
/*
* flush_pmd_entry
*
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
#define flush_tlb_range local_flush_tlb_range
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all local_flush_bp_all
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
#endif
/*
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 5c27696de14..8b1f37bfeee 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -2,6 +2,7 @@
#define _ASM_ARM_XEN_EVENTS_H
#include <asm/ptrace.h>
+#include <asm/atomic.h>
enum ipi_vector {
XEN_PLACEHOLDER_VECTOR,
@@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-/*
- * We cannot use xchg because it does not support 8-byte
- * values. However it is safe to use {ldr,dtd}exd directly because all
- * platforms which Xen can run on support those instructions.
- */
-static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
-{
- xen_ulong_t oldval;
- unsigned int tmp;
-
- wmb();
- asm volatile("@ xchg_xen_ulong\n"
- "1: ldrexd %0, %H0, [%3]\n"
- " strexd %1, %2, %H2, [%3]\n"
- " teq %1, #0\n"
- " bne 1b"
- : "=&r" (oldval), "=&r" (tmp)
- : "r" (val), "r" (ptr)
- : "memory", "cc");
- return oldval;
-}
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+ atomic64_t, \
+ counter), (val))
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 4da7cde70b5..af33b44990e 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -404,7 +404,7 @@
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
- /* 378 for kcmp */
+#define __NR_kcmp (__NR_SYSCALL_BASE+378)
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
/*
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 3c09d8c7798..a53efa99369 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -110,7 +110,7 @@ int main(void)
BLANK();
#endif
#ifdef CONFIG_CPU_HAS_ASID
- DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
+ DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
BLANK();
#endif
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
@@ -149,6 +149,10 @@ int main(void)
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ BLANK();
+ DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
+ DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
+ BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 0cc57611fc4..c6ca7e37677 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,7 +387,7 @@
/* 375 */ CALL(sys_setns)
CALL(sys_process_vm_readv)
CALL(sys_process_vm_writev)
- CALL(sys_ni_syscall) /* reserved for sys_kcmp */
+ CALL(sys_kcmp)
CALL(sys_finit_module)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 486a15ae901..e0eb9a1cae7 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -184,13 +184,22 @@ __create_page_tables:
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
-1: str r3, [r0], #4 @ set bottom PGD entry bits
+1:
+#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4 @ set top PGD entry bits
+ str r3, [r0], #4 @ set bottom PGD entry bits
+#else
+ str r3, [r0], #4 @ set bottom PGD entry bits
+ str r7, [r0], #4 @ set top PGD entry bits
+#endif
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ add r4, r4, #4 @ we only write the bottom word
+#endif
#endif
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
+#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+ sub r4, r4, #4 @ Fixup page table pointer
+ @ for 64-bit descriptors
+#endif
+
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
@@ -276,13 +290,17 @@ __create_page_tables:
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ str r7, [r0], #4
+ str r3, [r0], #4
#else
- orr r3, r3, #PMD_SECT_XN
-#endif
str r3, [r0], #4
-#ifdef CONFIG_ARM_LPAE
str r7, [r0], #4
#endif
+#else
+ orr r3, r3, #PMD_SECT_XN
+ str r3, [r0], #4
+#endif
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5eae53e7a2e..96093b75ab9 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1023,7 +1023,7 @@ out_mdbgen:
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
return NOTIFY_OK;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 31e0eb353cd..146157dfe27 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
}
if (event->group_leader != event) {
- if (validate_group(event) != 0);
+ if (validate_group(event) != 0)
return -EINVAL;
}
@@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
};
-static void __init armpmu_init(struct arm_pmu *armpmu)
+static void armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8c79a9e70b8..039cffb053a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1bdfd87c8e4..79078edbb9b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* switch away from it before attempting any exclusive accesses.
*/
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
@@ -479,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 400;
+ evt->rating = 100;
evt->mult = 1;
evt->set_mode = broadcast_timer_set_mode;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 02c5d2ce23b..bd030053139 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}
+static inline void ipi_flush_bp_all(void *ignored)
+{
+ local_flush_bp_all();
+}
+
void flush_tlb_all(void)
{
if (tlb_ops_need_broadcast())
@@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
local_flush_tlb_kernel_range(start, end);
}
+void flush_bp_all(void)
+{
+ if (tlb_ops_need_broadcast())
+ on_each_cpu(ipi_flush_bp_all, NULL, 1);
+ else
+ local_flush_bp_all();
+}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index c092115d903..3f256503748 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
struct device_node *np;
int err;
+ if (!is_smp() || !setup_max_cpus)
+ return;
+
np = of_find_matching_node(NULL, twd_of_match);
if (!np)
return;
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 358bca3a995..c59c97ea826 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
local_flush_tlb_all();
}
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923ab8..94b0650ea98 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,27 +14,15 @@
.text
.align 5
- .word 0
-
-1: subs r2, r2, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r1, [r0], #1 @ 1
- strleb r1, [r0], #1 @ 1
- strb r1, [r0], #1 @ 1
- add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memset again.
- */
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
- bne 1b @ 1
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
/*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
*/
- orr r1, r1, r1, lsl #8
+1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@@ -43,29 +31,28 @@ ENTRY(memset)
#if ! CALGN(1)+0
/*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
*/
- str lr, [sp, #-4]!
- mov ip, r1
+ stmfd sp!, {r8, lr}
+ mov r8, r1
mov lr, r1
2: subs r2, r2, #64
- stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+ ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia r0!, {r1, r3, ip, lr}
- stmneia r0!, {r1, r3, ip, lr}
+ stmneia ip!, {r1, r3, r8, lr}
+ stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia r0!, {r1, r3, ip, lr}
- ldr lr, [sp], #4
+ stmneia ip!, {r1, r3, r8, lr}
+ ldmfd sp!, {r8, lr}
#else
@@ -74,54 +61,63 @@ ENTRY(memset)
* whole cache lines at once.
*/
- stmfd sp!, {r4-r7, lr}
+ stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
- mov ip, r1
+ mov r8, r1
mov lr, r1
cmp r2, #96
- tstgt r0, #31
+ tstgt ip, #31
ble 3f
- and ip, r0, #31
- rsb ip, ip, #32
- sub r2, r2, ip
- movs ip, ip, lsl #(32 - 4)
- stmcsia r0!, {r4, r5, r6, r7}
- stmmiia r0!, {r4, r5}
- tst ip, #(1 << 30)
- mov ip, r1
- strne r1, [r0], #4
+ and r8, ip, #31
+ rsb r8, r8, #32
+ sub r2, r2, r8
+ movs r8, r8, lsl #(32 - 4)
+ stmcsia ip!, {r4, r5, r6, r7}
+ stmmiia ip!, {r4, r5}
+ tst r8, #(1 << 30)
+ mov r8, r1
+ strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia r0!, {r1, r3-r7, ip, lr}
- stmgeia r0!, {r1, r3-r7, ip, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
+ ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
- stmneia r0!, {r1, r3-r7, ip, lr}
+ stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia r0!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
+ stmneia ip!, {r4-r7}
+ ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
- stmneia r0!, {r1, r3}
+ stmneia ip!, {r1, r3}
tst r2, #4
- strne r1, [r0], #4
+ strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [r0], #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
+ strneb r1, [ip], #1
tst r2, #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
mov pc, lr
+
+6: subs r2, r2, #4 @ 1 do we have enough
+ blt 5b @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+ b 1b
ENDPROC(memset)
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index 2ea7059b840..c20a870ea9c 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -176,6 +176,7 @@ static struct w1_gpio_platform_data w1_gpio_pdata = {
/* If you choose to use a pin other than PB16 it needs to be 3.3V */
.pin = AT91_PIN_PB16,
.is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device w1_device = {
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index a033b8df9fb..869cbecf00b 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -188,6 +188,7 @@ static struct spi_board_info portuxg20_spi_devices[] = {
static struct w1_gpio_platform_data w1_gpio_pdata = {
.pin = AT91_PIN_PA29,
.is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device w1_device = {
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 7b025ee528a..2f9ff93a4e6 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -172,7 +172,7 @@ static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
static enum mx6q_clks const clks_init_on[] __initconst = {
- mmdc_ch0_axi, rom,
+ mmdc_ch0_axi, rom, pll1_sys,
};
static struct clk_div_table clk_enet_ref_table[] = {
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index 921fc155585..a58c8b0527c 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -26,16 +26,16 @@ ENDPROC(v7_secondary_startup)
#ifdef CONFIG_PM
/*
- * The following code is located into the .data section. This is to
- * allow phys_l2x0_saved_regs to be accessed with a relative load
- * as we are running on physical address here.
+ * The following code must assume it is running from physical address
+ * where absolute virtual addresses to the data section have to be
+ * turned into relative ones.
*/
- .data
- .align
#ifdef CONFIG_CACHE_L2X0
.macro pl310_resume
- ldr r2, phys_l2x0_saved_regs
+ adr r0, l2x0_saved_regs_offset
+ ldr r2, [r0]
+ add r2, r2, r0
ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
@@ -43,9 +43,9 @@ ENDPROC(v7_secondary_startup)
str r1, [r0, #L2X0_CTRL] @ re-enable L2
.endm
- .globl phys_l2x0_saved_regs
-phys_l2x0_saved_regs:
- .long 0
+l2x0_saved_regs_offset:
+ .word l2x0_saved_regs - .
+
#else
.macro pl310_resume
.endm
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index ee42d20cba1..5faba7a3c95 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -22,8 +22,6 @@
#include "common.h"
#include "hardware.h"
-extern unsigned long phys_l2x0_saved_regs;
-
static int imx6q_suspend_finish(unsigned long val)
{
cpu_do_idle();
@@ -57,18 +55,5 @@ static const struct platform_suspend_ops imx6q_pm_ops = {
void __init imx6q_pm_init(void)
{
- /*
- * The l2x0 core code provides an infrastucture to save and restore
- * l2x0 registers across suspend/resume cycle. But because imx6q
- * retains L2 content during suspend and needs to resume L2 before
- * MMU is enabled, it can only utilize register saving support and
- * have to take care of restoring on its own. So we save physical
- * address of the data structure used by l2x0 core to save registers,
- * and later restore the necessary ones in imx6q resume entry.
- */
-#ifdef CONFIG_CACHE_L2X0
- phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
-#endif
-
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index d42730a1d4a..d599e354ca5 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -163,6 +163,7 @@ static struct platform_device vulcan_max6369 = {
static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = {
.pin = 14,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device vulcan_w1_gpio = {
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 2e73e9d53f7..d367aa6b47b 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -41,16 +41,12 @@ static void __init kirkwood_legacy_clk_init(void)
struct device_node *np = of_find_compatible_node(
NULL, NULL, "marvell,kirkwood-gating-clock");
-
struct of_phandle_args clkspec;
+ struct clk *clk;
clkspec.np = np;
clkspec.args_count = 1;
- clkspec.args[0] = CGC_BIT_GE0;
- orion_clkdev_add(NULL, "mv643xx_eth_port.0",
- of_clk_get_from_provider(&clkspec));
-
clkspec.args[0] = CGC_BIT_PEX0;
orion_clkdev_add("0", "pcie",
of_clk_get_from_provider(&clkspec));
@@ -59,9 +55,24 @@ static void __init kirkwood_legacy_clk_init(void)
orion_clkdev_add("1", "pcie",
of_clk_get_from_provider(&clkspec));
- clkspec.args[0] = CGC_BIT_GE1;
- orion_clkdev_add(NULL, "mv643xx_eth_port.1",
+ clkspec.args[0] = CGC_BIT_SDIO;
+ orion_clkdev_add(NULL, "mvsdio",
of_clk_get_from_provider(&clkspec));
+
+ /*
+ * The ethernet interfaces forget the MAC address assigned by
+ * u-boot if the clocks are turned off. Until proper DT support
+ * is available we always enable them for now.
+ */
+ clkspec.args[0] = CGC_BIT_GE0;
+ clk = of_clk_get_from_provider(&clkspec);
+ orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk);
+ clk_prepare_enable(clk);
+
+ clkspec.args[0] = CGC_BIT_GE1;
+ clk = of_clk_get_from_provider(&clkspec);
+ orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk);
+ clk_prepare_enable(clk);
}
static void __init kirkwood_of_clk_init(void)
diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c
index 8fb23af154b..e26eeba4659 100644
--- a/arch/arm/mach-mxs/icoll.c
+++ b/arch/arm/mach-mxs/icoll.c
@@ -100,7 +100,7 @@ static struct irq_domain_ops icoll_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-void __init icoll_of_init(struct device_node *np,
+static void __init icoll_of_init(struct device_node *np,
struct device_node *interrupt_parent)
{
/*
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 05218671334..3218f1f2c0e 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -402,17 +402,17 @@ static void __init cfa10049_init(void)
{
enable_clk_enet_out();
update_fec_mac_prop(OUI_CRYSTALFONTZ);
+
+ mxsfb_pdata.mode_list = cfa10049_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
+ mxsfb_pdata.default_bpp = 32;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
}
static void __init cfa10037_init(void)
{
enable_clk_enet_out();
update_fec_mac_prop(OUI_CRYSTALFONTZ);
-
- mxsfb_pdata.mode_list = cfa10049_video_modes;
- mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
- mxsfb_pdata.default_bpp = 32;
- mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
}
static void __init apf28_init(void)
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index a4294aa9f30..e63b7d87acb 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -18,6 +18,7 @@
#include <mach/mx23.h>
#include <mach/mx28.h>
+#include <mach/common.h>
/*
* Define the MX23 memory map.
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 54add60f94c..1dff4670375 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -19,6 +19,7 @@
#include <asm/processor.h> /* for cpu_relax() */
#include <mach/mxs.h>
+#include <mach/common.h>
#define OCOTP_WORD_OFFSET 0x20
#define OCOTP_WORD_COUNT 0x20
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 27c2cb7ab81..1504b68f4c6 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
{
int irq;
- vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
+ vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
index 6ce914d54a3..8f74a844a77 100644
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ b/arch/arm/mach-netx/include/mach/irqs.h
@@ -17,42 +17,42 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define NETX_IRQ_VIC_START 0
-#define NETX_IRQ_SOFTINT 0
-#define NETX_IRQ_TIMER0 1
-#define NETX_IRQ_TIMER1 2
-#define NETX_IRQ_TIMER2 3
-#define NETX_IRQ_SYSTIME_NS 4
-#define NETX_IRQ_SYSTIME_S 5
-#define NETX_IRQ_GPIO_15 6
-#define NETX_IRQ_WATCHDOG 7
-#define NETX_IRQ_UART0 8
-#define NETX_IRQ_UART1 9
-#define NETX_IRQ_UART2 10
-#define NETX_IRQ_USB 11
-#define NETX_IRQ_SPI 12
-#define NETX_IRQ_I2C 13
-#define NETX_IRQ_LCD 14
-#define NETX_IRQ_HIF 15
-#define NETX_IRQ_GPIO_0_14 16
-#define NETX_IRQ_XPEC0 17
-#define NETX_IRQ_XPEC1 18
-#define NETX_IRQ_XPEC2 19
-#define NETX_IRQ_XPEC3 20
-#define NETX_IRQ_XPEC(no) (17 + (no))
-#define NETX_IRQ_MSYNC0 21
-#define NETX_IRQ_MSYNC1 22
-#define NETX_IRQ_MSYNC2 23
-#define NETX_IRQ_MSYNC3 24
-#define NETX_IRQ_IRQ_PHY 25
-#define NETX_IRQ_ISO_AREA 26
+#define NETX_IRQ_VIC_START 64
+#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
+#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
+#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
+#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
+#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
+#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
+#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
+#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
+#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
+#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
+#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
+#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
+#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
+#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
+#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
+#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
+#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
+#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
+#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
+#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
+#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
+#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
+#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
+#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
+#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
+#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
+#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
+#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
/* int 27 is reserved */
/* int 28 is reserved */
-#define NETX_IRQ_TIMER3 29
-#define NETX_IRQ_TIMER4 30
+#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
+#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
/* int 31 is reserved */
-#define NETX_IRQS 32
+#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
/* for multiplexed irqs on gpio 0..14 */
#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index fb18831e88a..14f7e992047 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -31,6 +31,8 @@
#include <plat/i2c.h>
+#include <mach/irqs.h>
+
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
void omap7xx_map_io(void);
#else
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 49ac3dfebef..8111cd9ff3e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -311,9 +311,6 @@ config MACH_OMAP_ZOOM2
default y
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SERIAL_8250
- select SERIAL_8250_CONSOLE
- select SERIAL_CORE_CONSOLE
config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board"
@@ -321,9 +318,6 @@ config MACH_OMAP_ZOOM3
default y
select OMAP_PACKAGE_CBP
select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SERIAL_8250
- select SERIAL_8250_CONSOLE
- select SERIAL_CORE_CONSOLE
config MACH_CM_T35
bool "CompuLab CM-T35/CM-T3730 modules"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 0274ff7a2a2..e54a4806019 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -102,6 +102,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
.init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
.init_time = omap3_sync32k_timer_init,
.dt_compat = omap3_boards_compat,
.restart = omap3xxx_restart,
@@ -119,6 +120,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
.init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
.init_time = omap3_secure_sync32k_timer_init,
.dt_compat = omap3_gp_boards_compat,
.restart = omap3xxx_restart,
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f7c4616cbb6..d2ea68ea678 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/usb/phy.h>
#include <linux/usb/musb.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -98,6 +99,7 @@ static void __init rx51_init(void)
sdrc_params = nokia_get_sdram_timings();
omap_sdrc_init(sdrc_params, sdrc_params);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(&musb_board_data);
rx51_peripherals_init();
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 0a6b9c7a63d..40f4a03d728 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -108,7 +108,6 @@ void omap35xx_init_late(void);
void omap3630_init_late(void);
void am35xx_init_late(void);
void ti81xx_init_late(void);
-void omap4430_init_late(void);
int omap2_common_pm_late_init(void);
#if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430)
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index e4b16c8efe8..410e1bac781 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1122,9 +1122,6 @@ int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
/* TODO: remove, see function definition */
gpmc_convert_ps_to_ns(gpmc_t);
- /* Now the GPMC is initialised, unreserve the chip-selects */
- gpmc_cs_map = 0;
-
return 0;
}
@@ -1383,6 +1380,9 @@ static int gpmc_probe(struct platform_device *pdev)
if (IS_ERR_VALUE(gpmc_setup_irq()))
dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
+ /* Now the GPMC is initialised, unreserve the chip-selects */
+ gpmc_cs_map = 0;
+
rc = gpmc_probe_dt(pdev);
if (rc < 0) {
clk_disable_unprepare(gpmc_l3_clk);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 6a217c98db5..f82cf878d6a 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -211,8 +211,6 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
return -EINVAL;
}
- pr_err("%s: Could not find signal %s\n", __func__, muxname);
-
return -ENODEV;
}
@@ -234,6 +232,8 @@ int __init omap_mux_get_by_name(const char *muxname,
return mux_mode;
}
+ pr_err("%s: Could not find signal %s\n", __func__, muxname);
+
return -ENODEV;
}
@@ -739,8 +739,9 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
- (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
- m, &omap_mux_dbg_signal_fops);
+ (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO,
+ mux_dbg_dir, m,
+ &omap_mux_dbg_signal_fops);
}
}
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index af41888acbd..969b0ba7fa7 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -505,6 +505,7 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = {
.pin = GPIO_ONE_WIRE,
.is_open_drain = 0,
.enable_external_pullup = w1_enable_external_pullup,
+ .ext_pullup_enable_pin = -EINVAL,
};
struct platform_device raumfeld_w1_gpio_device = {
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index f9d754f90c5..d2b3937c401 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -14,7 +14,7 @@
#define pr_fmt(fmt) "SPEAr3xx: " fmt
#include <linux/amba/pl022.h>
-#include <linux/amba/pl08x.h>
+#include <linux/amba/pl080.h>
#include <linux/io.h>
#include <plat/pl080.h>
#include <mach/generic.h>
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6..a5a4b2bc42b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
return 0;
}
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
- u64 asid = mm->context.id;
+ u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm));
}
- mm->context.id = asid;
+ return asid;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
+ u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+ asid = atomic64_read(&mm->context.id);
+ if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
- if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- new_context(mm, cpu);
-
- atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid = atomic64_read(&mm->context.id);
+ if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ }
+
+ atomic64_set(&per_cpu(active_asids, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d..e9db6b4bf65 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc4..5ee505c937d 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9e..6ffd78c0f9a 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
- and r3, r1, #0xff
+ asid r3, r1
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
index febe3862873..807ac8e5cbc 100644
--- a/arch/arm/plat-orion/addr-map.c
+++ b/arch/arm/plat-orion/addr-map.c
@@ -157,9 +157,12 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i));
/*
- * Chip select enabled?
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
*/
- if (size & 1) {
+ if ((size & 1) && !(base & 0xF)) {
struct mbus_dram_window *w;
w = &orion_mbus_dram_info.cs[cs++];
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 739d016eb27..8a08c31b5e2 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -10,7 +10,7 @@ choice
config ARCH_SPEAR13XX
bool "ST SPEAr13xx with Device Tree"
- select ARCH_HAVE_CPUFREQ
+ select ARCH_HAS_CPUFREQ
select ARM_GIC
select CPU_V7
select GPIO_SPEAR_SPICS
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 9b89257b2cf..c1a868d398b 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,7 +7,7 @@ config AVR32
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 600494c70e9..c3f2e0bc644 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,7 +33,7 @@ config BLACKFIN
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index bb0ac66cf53..06dd026533e 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -43,7 +43,7 @@ config CRIS
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 12369b194c7..2ce731f9aa4 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,7 +6,7 @@ config FRV
select HAVE_PERF_EVENTS
select HAVE_UID16
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index ae8551eb373..79250de1b12 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -5,7 +5,7 @@ config H8300
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 33f3fdc0b21..9a02f71c6b1 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,7 +26,7 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 433f5e8a2cd..2eda28414ab 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
.mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
+MODULE_ALIAS_FS("pfmfs");
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 92623818a1f..bcd17b20657 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,7 +10,7 @@ config M32R
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_DEBUG_BUGVERBOSE
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h
index da4518f82d6..98470fe483b 100644
--- a/arch/m32r/include/uapi/asm/stat.h
+++ b/arch/m32r/include/uapi/asm/stat.h
@@ -63,10 +63,10 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
-#if defined(__BIG_ENDIAN)
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
-#elif defined(__LITTLE_ENDIAN)
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
#else
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0e708c78e01..6de813370b8 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,7 +8,7 @@ config M68K
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 7cdf6b01038..7240584d343 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -310,7 +310,6 @@ config COBRA5282
config SOM5282EM
bool "EMAC.Inc SOM5282EM board support"
depends on M528x
- select EMAC_INC
help
Support for the EMAC.Inc SOM5282EM module.
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h
index a337e56d09b..4ebf098b8a1 100644
--- a/arch/m68k/include/asm/MC68328.h
+++ b/arch/m68k/include/asm/MC68328.h
@@ -293,7 +293,7 @@
/*
* Here go the bitmasks themselves
*/
-#define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */
+#define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */
#define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */
#define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */
#define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */
@@ -327,7 +327,7 @@
#define IWR_ADDR 0xfffff308
#define IWR LONG_REF(IWR_ADDR)
-#define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -357,7 +357,7 @@
#define ISR_ADDR 0xfffff30c
#define ISR LONG_REF(ISR_ADDR)
-#define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -391,7 +391,7 @@
#define IPR_ADDR 0xfffff310
#define IPR LONG_REF(IPR_ADDR)
-#define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -757,7 +757,7 @@
/* 'EZ328-compatible definitions */
#define TCN_ADDR TCN1_ADDR
-#define TCN TCN
+#define TCN TCN1
/*
* Timer Unit 1 and 2 Status Registers
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 71fb29938db..911ba472e6c 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -57,6 +57,9 @@ void (*mach_reset)(void);
void (*mach_halt)(void);
void (*mach_power_off)(void);
+#ifdef CONFIG_M68000
+#define CPU_NAME "MC68000"
+#endif
#ifdef CONFIG_M68328
#define CPU_NAME "MC68328"
#endif
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index afd8106fd83..519aad8fa81 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -188,7 +188,7 @@ void __init mem_init(void)
}
}
-#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c
index 83b7dad7a84..b03a9d27183 100644
--- a/arch/m68k/platform/coldfire/m528x.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -69,7 +69,7 @@ static void __init m528x_uarts_init(void)
u8 port;
/* make sure PUAPAR is set for UART0 and UART1 */
- port = readb(MCF5282_GPIO_PUAPAR);
+ port = readb(MCFGPIO_PUAPAR);
port |= 0x03 | (0x03 << 2);
writeb(port, MCFGPIO_PUAPAR);
}
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
index d63b9d0e57d..d2baf696179 100644
--- a/arch/metag/include/asm/elf.h
+++ b/arch/metag/include/asm/elf.h
@@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#define STACK_RND_MASK (0)
#ifdef CONFIG_METAG_USER_TCM
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index cd7f2f2ad41..975f2f4e3ec 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -40,6 +40,7 @@ endchoice
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
+ select ARCH_WANT_NUMA_VARIABLE_LOCALITY
help
Some Meta systems have MMU-mappable on-chip memories with
lower latencies than main memory. This enables support for
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 7843d11156e..1323fa2530e 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,7 +19,7 @@ config MICROBLAZE
select HAVE_DEBUG_KMEMLEAK
select IRQ_DOMAIN
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ae9c716c46b..cd2e21ff562 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -38,7 +38,7 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index b06c7360b1c..428da175d07 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,7 @@ config MN10300
select HAVE_ARCH_KGDB
select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 014a6482ed4..9ab3bf2eca8 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -9,10 +9,9 @@ config OPENRISC
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
select HAVE_MEMBLOCK
- select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a9ff712a286..0339181bf3a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -21,7 +21,7 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select TTY # Needed for pdc_cons.c
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b89d7eb730a..80821512e9c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -98,7 +98,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
- select HAVE_VIRT_TO_BUS if !PPC64
+ select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index a5f8264d2d3..125e1652006 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -113,7 +113,7 @@
STEPUP4((t)+16, fn)
_GLOBAL(powerpc_sha_transform)
- PPC_STLU r1,-STACKFRAMESIZE(r1)
+ PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
- addi r1,r1,STACKFRAMESIZE
+ addi r1,r1,INT_FRAME_SIZE
blr
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ef918a2328b..08bd299c75b 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
/* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix, postfix) \
static __inline__ void fn(unsigned long mask, \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e6658612203..c9c67fc888c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -266,7 +266,8 @@
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
+#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 535b6d8a41c..ebbec52d21b 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f25b5c45c43..1487f0f1229 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 354
+#define __NR_syscalls 355
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 8c478c6c6b1..74cb4d72d67 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -376,6 +376,7 @@
#define __NR_process_vm_readv 351
#define __NR_process_vm_writev 352
#define __NR_finit_module 353
+#define __NR_kcmp 354
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index d29facbf9a2..ea847abb0d0 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
+ bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
- bl __init_FSCR
bl __init_TLB
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
+ bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
__init_FSCR:
mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR
+ ori r3,r3,FSCR_TAR|FSCR_DSCR
mtspr SPRN_FSCR,r3
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a8a5361fb70..87ef8f5ee5b 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
mflr r10 ; \
ld r12,PACAKBASE(r13) ; \
LOAD_HANDLER(r12, system_call_entry_direct) ; \
- mtlr r12 ; \
+ mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \
/* Re-use of r13... No spare regs to do this */ \
li r13,MSR_RI ; \
mtmsrd r13,1 ; \
GET_PACA(r13) ; /* get r13 back */ \
- blr ;
+ bctr ;
#else
/* We can branch directly */
#define SYSCALL_PSERIES_2_DIRECT \
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 863184b182f..3f3bb4cdbbe 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
.mount = spufs_mount,
.kill_sb = kill_litter_super,
};
+MODULE_ALIAS_FS("spufs");
static int __init spufs_init(void)
{
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4cbeaf..4557e91626c 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
+ strlcpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
+ sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4b505370a1d..eb8fb629f00 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,7 +134,7 @@ config S390
select HAVE_SYSCALL_WRAPPERS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 8538015ed4a..5f7d7ba2874 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
+MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index f1eddd150dd..c879fad404c 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,7 @@
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
+#include <linux/errno.h>
#include <asm/facility.h>
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index e569aa1fd2b..c8def8bc902 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -12,7 +12,7 @@ config SCORE
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index ff496ab1e79..25877aebc68 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -17,7 +17,7 @@ config TILE
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select HAVE_SYSCALL_WRAPPERS if TILEGX
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 001d418a895..78f1f2ded86 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi);
+long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
+ unsigned int offset_low, loff_t __user * result,
+ unsigned int origin);
/* Assembly trampoline to avoid clobbering r0. */
long _compat_sys_rt_sigreturn(void);
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 7f72401b4f4..6ea4cdb3c6a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -32,50 +32,65 @@
* adapt the usual convention.
*/
-long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
+ u32, low, u32, high)
{
return sys_truncate(filename, ((loff_t)high << 32) | low);
}
-long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
+ u32, low, u32, high)
{
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
}
-long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
+ size_t, count, u32, dummy, u32, low, u32, high)
{
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
-long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
+ size_t, count, u32, dummy, u32, low, u32, high)
{
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
+COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
+ char __user *, buf, size_t, len)
{
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
}
-long compat_sys_sync_file_range2(int fd, unsigned int flags,
- u32 offset_lo, u32 offset_hi,
- u32 nbytes_lo, u32 nbytes_hi)
+COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
+ u32, offset_lo, u32, offset_hi,
+ u32, nbytes_lo, u32, nbytes_hi)
{
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)nbytes_hi << 32) | nbytes_lo,
flags);
}
-long compat_sys_fallocate(int fd, int mode,
- u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
+ u32, offset_lo, u32, offset_hi,
+ u32, len_lo, u32, len_hi)
{
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)len_hi << 32) | len_lo);
}
+/*
+ * Avoid bug in generic sys_llseek() that specifies offset_high and
+ * offset_low as "unsigned long", thus making it possible to pass
+ * a sign-extended high 32 bits in offset_low.
+ */
+COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
+{
+ return sys_llseek(fd, offset_high, offset_low, result, origin);
+}
+
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
@@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
/* See comments in sys.c */
#define compat_sys_fadvise64_64 sys32_fadvise64_64
#define compat_sys_readahead sys32_readahead
+#define sys_llseek compat_sys_llseek
/* Call the assembly trampolines where necessary. */
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index 78f1b899996..c512b0306dd 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
extern int console_open_chan(struct line *line, struct console *co);
extern void deactivate_chan(struct chan *chan, int irq);
extern void reactivate_chan(struct chan *chan, int irq);
-extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty);
+extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
extern int enable_chan(struct line *line);
extern void close_chan(struct line *line);
extern int chan_window_size(struct line *line,
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 15c553c239a..80b47cb71e0 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
return err;
}
-void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
+void chan_enable_winch(struct chan *chan, struct tty_port *port)
{
if (chan && chan->primary && chan->ops->winch)
- register_winch(chan->fd, tty);
+ register_winch(chan->fd, port);
}
static void line_timer_cb(struct work_struct *work)
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 9be670ad23b..3fd7c3efdb1 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -216,7 +216,7 @@ static int winch_thread(void *arg)
}
}
-static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
+static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
@@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
return err;
}
-void register_winch(int fd, struct tty_struct *tty)
+void register_winch(int fd, struct tty_port *port)
{
unsigned long stack;
int pid, thread, count, thread_fd = -1;
@@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
return;
pid = tcgetpgrp(fd);
- if (is_skas_winch(pid, fd, tty)) {
- register_winch_irq(-1, fd, -1, tty, 0);
+ if (is_skas_winch(pid, fd, port)) {
+ register_winch_irq(-1, fd, -1, port, 0);
return;
}
if (pid == -1) {
- thread = winch_tramp(fd, tty, &thread_fd, &stack);
+ thread = winch_tramp(fd, port, &thread_fd, &stack);
if (thread < 0)
return;
- register_winch_irq(thread_fd, fd, thread, tty, stack);
+ register_winch_irq(thread_fd, fd, thread, port, stack);
count = write(thread_fd, &c, sizeof(c));
if (count != sizeof(c))
diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h
index dc693298eb8..03f1b565c5f 100644
--- a/arch/um/drivers/chan_user.h
+++ b/arch/um/drivers/chan_user.h
@@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
unsigned short *cols_out);
extern void generic_free(void *data);
-struct tty_struct;
-extern void register_winch(int fd, struct tty_struct *tty);
+struct tty_port;
+extern void register_winch(int fd, struct tty_port *port);
extern void register_winch_irq(int fd, int tty_fd, int pid,
- struct tty_struct *tty, unsigned long stack);
+ struct tty_port *port, unsigned long stack);
#define __channel_help(fn, prefix) \
__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index f1b38571f94..be541cf69fd 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -305,7 +305,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
return ret;
if (!line->sigio) {
- chan_enable_winch(line->chan_out, tty);
+ chan_enable_winch(line->chan_out, port);
line->sigio = 1;
}
@@ -315,8 +315,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
return 0;
}
+static void unregister_winch(struct tty_struct *tty);
+
+static void line_destruct(struct tty_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(port);
+ struct line *line = tty->driver_data;
+
+ if (line->sigio) {
+ unregister_winch(tty);
+ line->sigio = 0;
+ }
+}
+
static const struct tty_port_operations line_port_ops = {
.activate = line_activate,
+ .destruct = line_destruct,
};
int line_open(struct tty_struct *tty, struct file *filp)
@@ -340,18 +354,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
return 0;
}
-static void unregister_winch(struct tty_struct *tty);
-
-void line_cleanup(struct tty_struct *tty)
-{
- struct line *line = tty->driver_data;
-
- if (line->sigio) {
- unregister_winch(tty);
- line->sigio = 0;
- }
-}
-
void line_close(struct tty_struct *tty, struct file * filp)
{
struct line *line = tty->driver_data;
@@ -601,7 +603,7 @@ struct winch {
int fd;
int tty_fd;
int pid;
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned long stack;
struct work_struct work;
};
@@ -655,7 +657,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
goto out;
}
}
- tty = winch->tty;
+ tty = tty_port_tty_get(winch->port);
if (tty != NULL) {
line = tty->driver_data;
if (line != NULL) {
@@ -663,6 +665,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
&tty->winsize.ws_col);
kill_pgrp(tty->pgrp, SIGWINCH, 1);
}
+ tty_kref_put(tty);
}
out:
if (winch->fd != -1)
@@ -670,7 +673,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
+void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
unsigned long stack)
{
struct winch *winch;
@@ -685,7 +688,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
.fd = fd,
.tty_fd = tty_fd,
.pid = pid,
- .tty = tty,
+ .port = port,
.stack = stack });
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
@@ -714,15 +717,18 @@ static void unregister_winch(struct tty_struct *tty)
{
struct list_head *ele, *next;
struct winch *winch;
+ struct tty_struct *wtty;
spin_lock(&winch_handler_lock);
list_for_each_safe(ele, next, &winch_handlers) {
winch = list_entry(ele, struct winch, list);
- if (winch->tty == tty) {
+ wtty = tty_port_tty_get(winch->port);
+ if (wtty == tty) {
free_winch(winch);
break;
}
+ tty_kref_put(wtty);
}
spin_unlock(&winch_handler_lock);
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d8926c30362..39f186252e0 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
len = (*lp->write)(lp->fd, skb, lp);
+ skb_tx_timestamp(skb);
if (len == skb->len) {
dev->stats.tx_packets++;
@@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops uml_net_ethtool_ops = {
.get_drvinfo = uml_net_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void uml_net_user_timer_expire(unsigned long _conn)
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 16fdd0a0f9d..b8d14fa5205 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.install = ssl_install,
- .cleanup = line_cleanup,
.hangup = line_hangup,
};
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 827777af3f6..7b361f36ca9 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
.set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
- .cleanup = line_cleanup,
.hangup = line_hangup,
};
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index b1469fe9329..9d9f1b4bf82 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,7 +15,7 @@
#include <sysdep/mcontext.h>
#include "internal.h"
-void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index da4b9e9999f..337518c5042 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <asm/unistd.h>
#include <init.h>
#include <os.h>
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index dc50b157fc8..2943e3acdf0 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -9,7 +9,7 @@ config UNICORE32
select GENERIC_ATOMIC64
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_HAVE_CUSTOM_GPIO_H
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a4f24f5b121..70c0f3da047 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,7 +112,7 @@ config X86
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL if X86_32
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 5b5e9cb774b..653668d140f 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -14,13 +14,29 @@
* analysis of kexec-tools; if other broken bootloaders initialize a
* different set of fields we will need to figure out how to disambiguate.
*
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
*/
static void sanitize_boot_params(struct boot_params *boot_params)
{
+ /*
+ * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
+ * this field. The purpose of this field is to guarantee
+ * compliance with the x86 boot spec located in
+ * Documentation/x86/boot.txt . That spec says that the
+ * *whole* structure should be cleared, after which only the
+ * portion defined by struct setup_header (boot_params->hdr)
+ * should be copied in.
+ *
+ * If you're having an issue because the sentinel is set, you
+ * need to change the whole structure to be cleared, not this
+ * (or any other) individual field, or you will soon have
+ * problems again.
+ */
if (boot_params->sentinel) {
- /*fields in boot_params are not valid, clear them */
+ /* fields in boot_params are left uninitialized, clear them */
memset(&boot_params->olpc_ofw_header, 0,
- (char *)&boot_params->alt_mem_k -
+ (char *)&boot_params->efi_info -
(char *)&boot_params->olpc_ofw_header);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 826054a4f2e..b05a575d56f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -729,3 +729,13 @@ void intel_ds_init(void)
}
}
}
+
+void perf_restore_debug_store(void)
+{
+ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return;
+
+ wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
+}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84d32855f65..90d8cc930f5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -171,9 +171,15 @@ static struct resource bss_resource = {
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
+ .wp_works_ok = -1,
+ .fdiv_bug = -1,
+};
/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
+ .wp_works_ok = -1,
+ .fdiv_bug = -1,
+};
EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a6ceaedc396..9f190a2a00e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
- int i;
void *mwait_ptr;
- struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
+ int i;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4903a03ae87..59b7fc45327 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);
- /* xen has big range in reserved near end of ram, skip it at first */
- addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE,
- PAGE_SIZE);
+ /* xen has big range in reserved near end of ram, skip it at first.*/
+ addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
real_end = addr + PMD_SIZE;
/* step_size need to be small so pgt_buf from BRK could cover it */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2610bd93c89..657438858e8 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
if (base > __pa(high_memory-1))
return 0;
+ /*
+ * some areas in the middle of the kernel identity range
+ * are not mapped, like the PCI space.
+ */
+ if (!page_is_ram(base >> PAGE_SHIFT))
+ return 0;
+
id_sz = (__pa(high_memory-1) <= base + size) ?
__pa(high_memory) - base :
size;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 120cee1c3f8..3c68768d7a7 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -11,6 +11,7 @@
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/smp.h>
+#include <linux/perf_event.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
@@ -228,6 +229,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
do_fpu_end();
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
+ perf_restore_debug_store();
}
/* Needed by apm.c */
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 35876ffac11..b09de49dbec 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,7 +9,7 @@ config XTENSA
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index ef6f155469b..40a84cc6740 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
- if (type && type->bus && type->find_device) {
+ if (type && type->match && type->find_device) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "bus type %s registered\n",
- type->bus->name);
+ printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
- type->bus->name);
+ printk(KERN_INFO PREFIX "bus type %s unregistered\n",
+ type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
-static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
+static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
- if (!type)
- return NULL;
-
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
- if (tmp->bus == type) {
+ if (tmp->match(dev)) {
ret = tmp;
break;
}
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
return ret;
}
-static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
-{
- struct acpi_bus_type *tmp;
- int ret = -ENODEV;
-
- down_read(&bus_type_sem);
- list_for_each_entry(tmp, &bus_type_list, list) {
- if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
- ret = 0;
- break;
- }
- }
- up_read(&bus_type_sem);
- return ret;
-}
-
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
void *addr_p, void **ret_p)
{
@@ -261,29 +241,12 @@ err:
static int acpi_platform_notify(struct device *dev)
{
- struct acpi_bus_type *type;
+ struct acpi_bus_type *type = acpi_get_bus_type(dev);
acpi_handle handle;
int ret;
ret = acpi_bind_one(dev, NULL);
- if (ret && (!dev->bus || !dev->parent)) {
- /* bridge devices genernally haven't bus or parent */
- ret = acpi_find_bridge_device(dev, &handle);
- if (!ret) {
- ret = acpi_bind_one(dev, handle);
- if (ret)
- goto out;
- }
- }
-
- type = acpi_get_bus_type(dev->bus);
- if (ret) {
- if (!type || !type->find_device) {
- DBG("No ACPI bus support for %s\n", dev_name(dev));
- ret = -EINVAL;
- goto out;
- }
-
+ if (ret && type) {
ret = type->find_device(dev, &handle);
if (ret) {
DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
{
struct acpi_bus_type *type;
- type = acpi_get_bus_type(dev->bus);
+ type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index eff722278ff..164d49569ae 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
}
exit:
- if (buffer.pointer)
- kfree(buffer.pointer);
+ kfree(buffer.pointer);
return apic_id;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index df34bd04ae6..bec717ffd25 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
return 0;
#endif
- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+ BUG_ON(pr->id >= nr_cpu_ids);
/*
* Buggy BIOS check
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 53e7ac9403a..e854582f29a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -465,7 +465,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
return result;
}
-static int acpi_processor_get_performance_info(struct acpi_processor *pr)
+int acpi_processor_get_performance_info(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
@@ -509,7 +509,7 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
#endif
return result;
}
-
+EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
int acpi_processor_notify_smm(struct module *calling_module)
{
acpi_status status;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d3a06a629a..24213033fba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[i] = 1;
- pr_cont(" S%d", i);
}
}
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
- pr_cont(KERN_CONT " S4");
if (nosigcheck)
return;
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
{
acpi_status status;
u8 type_a, type_b;
+ char supported[ACPI_S_STATE_COUNT * 3 + 1];
+ char *pos = supported;
+ int i;
if (acpi_disabled)
return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
- pr_info(PREFIX "(supports S0");
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[ACPI_STATE_S5] = 1;
- pr_cont(" S5");
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
}
- pr_cont(")\n");
+
+ supported[0] = 0;
+ for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
+ if (sleep_states[i])
+ pos += sprintf(pos, " S%d", i);
+ }
+ pr_info(PREFIX "(supports%s)\n", supported);
+
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5.
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 093c4355496..1f44e56cc65 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
static int tegra_ahb_suspend(struct device *dev)
{
int i;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 0ea1018280b..beea3115577 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
return -ENODEV;
}
-static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
-{
- return -ENODEV;
-}
-
static struct acpi_bus_type ata_acpi_bus = {
- .find_bridge = ata_acpi_find_dummy,
+ .name = "ATA",
.find_device = ata_acpi_find_device,
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb..15beb500a4e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
- dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx);
}
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
- dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a..cfc3226ec49 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
{
if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
+ dev->power.qos = NULL;
dev->power.early_init = true;
}
}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_sleep_init(struct device *dev) {}
-static inline void device_pm_add(struct device *dev)
-{
- dev_pm_qos_constraints_init(dev);
-}
+static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_remove(struct device *dev)
{
- dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev);
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5..5f74587ef25 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <linux/err.h>
#include "power.h"
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
struct pm_qos_flags *pqf;
s32 val;
- if (!qos)
+ if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
- return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
+ return IS_ERR_OR_NULL(dev->power.qos) ?
+ 0 : pm_qos_read_value(&dev->power.qos->latency);
}
/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
return 0;
}
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
- mutex_lock(&dev_pm_qos_mtx);
- dev->power.qos = NULL;
- dev->power.power_state = PMSG_ON;
- mutex_unlock(&dev_pm_qos_mtx);
-}
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
+ mutex_lock(&dev_pm_qos_mtx);
+
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- dev_pm_qos_hide_latency_limit(dev);
- dev_pm_qos_hide_flags(dev);
+ __dev_pm_qos_hide_latency_limit(dev);
+ __dev_pm_qos_hide_flags(dev);
- mutex_lock(&dev_pm_qos_mtx);
-
- dev->power.power_state = PMSG_INVALID;
qos = dev->power.qos;
if (!qos)
goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
}
spin_lock_irq(&dev->power.lock);
- dev->power.qos = NULL;
+ dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
"%s() called for already added request\n", __func__))
return -EINVAL;
- req->dev = dev;
-
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.qos) {
- if (dev->power.power_state.event == PM_EVENT_INVALID) {
- /* The device has been removed from the system. */
- req->dev = NULL;
- ret = -ENODEV;
- goto out;
- } else {
- /*
- * Allocate the constraints data on the first call to
- * add_request, i.e. only if the data is not already
- * allocated and if the device has not been removed.
- */
- ret = dev_pm_qos_constraints_allocate(dev);
- }
- }
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
if (!ret) {
+ req->dev = dev;
req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
}
- out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 curr_value;
int ret = 0;
- if (!req->dev->power.qos)
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
int ret;
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_update_request(req, new_value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
if (!req) /*guard against callers passing in null */
return -EINVAL;
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
"%s() called for unknown object\n", __func__))
return -EINVAL;
- mutex_lock(&dev_pm_qos_mtx);
- ret = __dev_pm_qos_update_request(req, new_value);
- mutex_unlock(&dev_pm_qos_mtx);
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
/**
* dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
- int ret = 0;
-
- if (!req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(!dev_pm_qos_request_active(req),
- "%s() called for unknown object\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.qos) {
- ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
- PM_QOS_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
+ ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.qos)
- ret = dev->power.power_state.event != PM_EVENT_INVALID ?
- dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (dev->power.qos)
+ if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(
dev->power.qos->latency.notifiers,
notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
+ struct dev_pm_qos_request *req = NULL;
+
switch(type) {
case DEV_PM_QOS_LATENCY:
- dev_pm_qos_remove_request(dev->power.qos->latency_req);
+ req = dev->power.qos->latency_req;
dev->power.qos->latency_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
- dev_pm_qos_remove_request(dev->power.qos->flags_req);
+ req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
}
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
}
/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
- if (dev->power.qos && dev->power.qos->latency_req)
- return -EEXIST;
-
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(req);
return ret;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->latency_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
dev->power.qos->latency_req = req;
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ }
+}
+
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (dev->power.qos && dev->power.qos->latency_req) {
- pm_qos_sysfs_remove_latency(dev);
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
- }
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (!device_is_registered(dev))
return -EINVAL;
- if (dev->power.qos && dev->power.qos->flags_req)
- return -EEXIST;
-
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- pm_runtime_get_sync(dev);
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
- if (ret < 0)
- goto fail;
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->flags_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
dev->power.qos->flags_req = req;
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
-fail:
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ }
+}
+
/**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space.
*/
void dev_pm_qos_hide_flags(struct device *dev)
{
- if (dev->power.qos && dev->power.qos->flags_req) {
- pm_qos_sysfs_remove_flags(dev);
- pm_runtime_get_sync(dev);
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
- pm_runtime_put(dev);
- }
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_flags(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
s32 value;
int ret;
- if (!dev->power.qos || !dev->power.qos->flags_req)
- return -EINVAL;
-
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
+ if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
value = dev_pm_qos_requested_flags(dev);
if (set)
value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+ out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
-
return ret;
}
+#else /* !CONFIG_PM_RUNTIME */
+static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static void __dev_pm_qos_hide_flags(struct device *dev) {}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a..a53ebd26570 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc..020ea2b9fd2 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
+ pm_runtime_put(map->dev);
return IRQ_NONE;
}
}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d3bde6cec92..30629a3d44c 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
return;
}
+ spin_lock_init(&pc_host->cfgspace_lock);
+
pc->host_controller = pc_host;
pc_host->pci_controller.io_resource = &pc_host->io_resource;
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1bafb40ec8a..69ae5972713 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -40,6 +40,7 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
static LIST_HEAD(rng_list);
static DEFINE_MUTEX(rng_mutex);
static int data_avail;
-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
- __cacheline_aligned;
+static u8 *rng_buffer;
+
+static size_t rng_buffer_size(void)
+{
+ return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
static inline int hwrng_init(struct hwrng *rng)
{
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
bytes_read = rng_get_data(current_rng, rng_buffer,
- sizeof(rng_buffer),
+ rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
mutex_lock(&rng_mutex);
+ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+ err = -ENOMEM;
+ if (!rng_buffer) {
+ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_buffer)
+ goto out_unlock;
+ }
+
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 10fd71ccf58..6bf4d47324e 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -92,14 +92,22 @@ static int probe_common(struct virtio_device *vdev)
{
int err;
+ if (vq) {
+ /* We only support one device for now */
+ return -EBUSY;
+ }
/* We expect a single virtqueue. */
vq = virtio_find_single_vq(vdev, random_recv_done, "input");
- if (IS_ERR(vq))
- return PTR_ERR(vq);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ vq = NULL;
+ return err;
+ }
err = hwrng_register(&virtio_hwrng);
if (err) {
vdev->config->del_vqs(vdev);
+ vq = NULL;
return err;
}
@@ -112,6 +120,7 @@ static void remove_common(struct virtio_device *vdev)
busy = false;
hwrng_unregister(&virtio_hwrng);
vdev->config->del_vqs(vdev);
+ vq = NULL;
}
static int virtrng_probe(struct virtio_device *vdev)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 594bda9dcfc..32a6c576495 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
unsigned long flags;
+ int wakeup_write = 0;
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, flags);
@@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
else
r->entropy_count = reserved;
- if (r->entropy_count < random_write_wakeup_thresh) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
+ if (r->entropy_count < random_write_wakeup_thresh)
+ wakeup_write = 1;
}
DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
@@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
spin_unlock_irqrestore(&r->lock, flags);
+ if (wakeup_write) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+
return nbytes;
}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 143ce1f899a..1e2de730536 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1292,7 +1292,6 @@ static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
- TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
};
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 32c61cb6d0b..ba6f51bc9f3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1931,7 +1931,6 @@ static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
- TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
};
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index fce2000eec3..1110478dd0f 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
(task_active_pid_ns(current) != &init_pid_ns))
return;
+ /* Can only change if privileged. */
+ if (!capable(CAP_NET_ADMIN)) {
+ err = EPERM;
+ goto out;
+ }
+
mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
err = EINVAL;
break;
}
+
+out:
cn_proc_ack(err, msg->seq, msg->ack);
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index d2ac9115060..46bde01eee6 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
* dbs: used as a shortform for demand based switching It helps to keep variable
* names smaller, simpler
* cdbs: common dbs
- * on_*: On-demand governor
+ * od_*: On-demand governor
* cs_*: Conservative governor
*/
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 66e3a71b81a..b61b5a3fad6 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -28,13 +28,7 @@
static int hb_voltage_change(unsigned int freq)
{
- int i;
- u32 msg[HB_CPUFREQ_IPC_LEN];
-
- msg[0] = HB_CPUFREQ_CHANGE_NOTE;
- msg[1] = freq / 1000000;
- for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
- msg[i] = 0;
+ u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
return pl320_ipc_transmit(msg);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 096fde0ebcb..f6dd1e76112 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
intel_pstate_get_min_max(cpu, &min, &max);
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
.owner = THIS_MODULE,
};
-static void intel_pstate_exit(void)
-{
- int cpu;
-
- sysfs_remove_group(intel_pstate_kobject,
- &intel_pstate_attr_group);
- debugfs_remove_recursive(debugfs_parent);
-
- cpufreq_unregister_driver(&intel_pstate_driver);
-
- if (!all_cpu_data)
- return;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (all_cpu_data[cpu]) {
- del_timer_sync(&all_cpu_data[cpu]->timer);
- kfree(all_cpu_data[cpu]);
- }
- }
-
- put_online_cpus();
- vfree(all_cpu_data);
-}
-module_exit(intel_pstate_exit);
-
static int __initdata no_load;
static int __init intel_pstate_init(void)
{
- int rc = 0;
+ int cpu, rc = 0;
const struct x86_cpu_id *id;
if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
intel_pstate_sysfs_expose_params();
return rc;
out:
- intel_pstate_exit();
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
return -ENODEV;
}
device_initcall(intel_pstate_init);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 982f1f5f574..4cd392dbf11 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
static int __init smbios_present(const char __iomem *p)
{
u8 buf[32];
- int offset = 0;
memcpy_fromio(buf, p, 32);
if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
@@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
dmi_ver = 0x0206;
break;
}
- offset = 16;
+ return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
}
- return dmi_present(buf + offset);
+ return 1;
}
void __init dmi_scan_machine(void)
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7320bf89170..fe62aa39223 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
return status;
}
+static efi_status_t
+check_var_size_locked(struct efivars *efivars, u32 attributes,
+ unsigned long size)
+{
+ u64 storage_size, remaining_size, max_size;
+ efi_status_t status;
+ const struct efivar_operations *fops = efivars->ops;
+
+ if (!efivars->ops->query_variable_info)
+ return EFI_UNSUPPORTED;
+
+ status = fops->query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (!storage_size || size > remaining_size || size > max_size ||
+ (remaining_size - size) < (storage_size / 2))
+ return EFI_OUT_OF_RESOURCES;
+
+ return status;
+}
+
+
+static efi_status_t
+check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
+{
+ efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efivars->lock, flags);
+ status = check_var_size_locked(efivars, attributes, size);
+ spin_unlock_irqrestore(&efivars->lock, flags);
+
+ return status;
+}
+
static ssize_t
efivar_guid_read(struct efivar_entry *entry, char *buf)
{
@@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
}
spin_lock_irq(&efivars->lock);
- status = efivars->ops->set_variable(new_var->VariableName,
- &new_var->VendorGuid,
- new_var->Attributes,
- new_var->DataSize,
- new_var->Data);
+
+ status = check_var_size_locked(efivars, new_var->Attributes,
+ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+ if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
+ status = efivars->ops->set_variable(new_var->VariableName,
+ &new_var->VendorGuid,
+ new_var->Attributes,
+ new_var->DataSize,
+ new_var->Data);
spin_unlock_irq(&efivars->lock);
@@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
u32 attributes;
struct inode *inode = file->f_mapping->host;
unsigned long datasize = count - sizeof(attributes);
- unsigned long newdatasize;
- u64 storage_size, remaining_size, max_size;
+ unsigned long newdatasize, varsize;
ssize_t bytes = 0;
if (count < sizeof(attributes))
@@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
* amounts of memory. Pick a default size of 64K if
* QueryVariableInfo() isn't supported by the firmware.
*/
- spin_lock_irq(&efivars->lock);
-
- if (!efivars->ops->query_variable_info)
- status = EFI_UNSUPPORTED;
- else {
- const struct efivar_operations *fops = efivars->ops;
- status = fops->query_variable_info(attributes, &storage_size,
- &remaining_size, &max_size);
- }
- spin_unlock_irq(&efivars->lock);
+ varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
+ status = check_var_size(efivars, attributes, varsize);
if (status != EFI_SUCCESS) {
if (status != EFI_UNSUPPORTED)
return efi_status_to_err(status);
- remaining_size = 65536;
+ if (datasize > 65536)
+ return -ENOSPC;
}
- if (datasize > remaining_size)
- return -ENOSPC;
-
data = kmalloc(datasize, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
*/
spin_lock_irq(&efivars->lock);
+ /*
+ * Ensure that the available space hasn't shrunk below the safe level
+ */
+
+ status = check_var_size_locked(efivars, attributes, varsize);
+
+ if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
+ spin_unlock_irq(&efivars->lock);
+ kfree(data);
+
+ return efi_status_to_err(status);
+ }
+
status = efivars->ops->set_variable(var->var.VariableName,
&var->var.VendorGuid,
attributes, datasize,
@@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
if (len < GUID_LEN + 2)
return false;
- /* GUID should be right after the first '-' */
- if (s - 1 != strchr(str, '-'))
+ /* GUID must be preceded by a '-' */
+ if (*(s - 1) != '-')
return false;
/*
@@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
{
+ struct dentry *d;
struct qstr q;
+ int err;
q.name = name;
q.len = strlen(name);
- if (efivarfs_d_hash(NULL, NULL, &q))
- return NULL;
+ err = efivarfs_d_hash(NULL, NULL, &q);
+ if (err)
+ return ERR_PTR(err);
+
+ d = d_alloc(parent, &q);
+ if (d)
+ return d;
- return d_alloc(parent, &q);
+ return ERR_PTR(-ENOMEM);
}
static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
struct efivar_entry *entry, *n;
struct efivars *efivars = &__efivars;
char *name;
+ int err = -ENOMEM;
efivarfs_sb = sb;
@@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
goto fail_name;
dentry = efivarfs_alloc_dentry(root, name);
- if (!dentry)
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
goto fail_inode;
+ }
/* copied by the above to local storage in the dentry. */
kfree(name);
@@ -1214,7 +1269,7 @@ fail_inode:
fail_name:
kfree(name);
fail:
- return -ENOMEM;
+ return err;
}
static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
@@ -1234,6 +1289,7 @@ static struct file_system_type efivarfs_type = {
.mount = efivarfs_mount,
.kill_sb = efivarfs_kill_sb,
};
+MODULE_ALIAS_FS("efivarfs");
/*
* Handle negative dentry.
@@ -1345,7 +1401,6 @@ static int efi_pstore_write(enum pstore_type_id type,
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
int i, ret = 0;
- u64 storage_space, remaining_space, max_variable_size;
efi_status_t status = EFI_NOT_FOUND;
unsigned long flags;
@@ -1365,11 +1420,11 @@ static int efi_pstore_write(enum pstore_type_id type,
* size: a size of logging data
* DUMP_NAME_LEN * 2: a maximum size of variable name
*/
- status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES,
- &storage_space,
- &remaining_space,
- &max_variable_size);
- if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
+
+ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
+ size + DUMP_NAME_LEN * 2);
+
+ if (status) {
spin_unlock_irqrestore(&efivars->lock, flags);
*id = part;
return -ENOSPC;
@@ -1544,6 +1599,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
return -EINVAL;
}
+ status = check_var_size_locked(efivars, new_var->Attributes,
+ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+ if (status && status != EFI_UNSUPPORTED) {
+ spin_unlock_irq(&efivars->lock);
+ return efi_status_to_err(status);
+ }
+
/* now *really* create the variable via EFI */
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6f2306db859..f9dbd503fc4 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
return data & (1 << bit) ? 1 : 0;
}
-static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
+static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
{
- return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
+ return ichx_priv.use_gpio & (1 << (nr / 32));
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 7472182967c..61a6fde6c08 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -42,6 +42,7 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
+#include <linux/clk.h>
#include <linux/pinctrl/consumer.h>
/*
@@ -496,6 +497,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct resource *res;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ struct clk *clk;
unsigned int ngpios;
int soc_variant;
int i, cpu, id;
@@ -529,6 +531,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
return id;
}
+ clk = devm_clk_get(&pdev->dev, NULL);
+ /* Not all SoCs require a clock.*/
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+
mvchip->soc_variant = soc_variant;
mvchip->chip.label = dev_name(&pdev->dev);
mvchip->chip.dev = &pdev->dev;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fff9786cdc6..c2534d62911 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
static void gpiod_free(struct gpio_desc *desc);
static int gpiod_direction_input(struct gpio_desc *desc);
static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_get_direction(const struct gpio_desc *desc);
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(struct gpio_desc *desc);
+static int gpiod_get_value(const struct gpio_desc *desc);
static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(struct gpio_desc *desc);
-static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_cansleep(const struct gpio_desc *desc);
+static int gpiod_to_irq(const struct gpio_desc *desc);
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
static int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
return 0;
}
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- return desc->chip;
+ return desc ? desc->chip : NULL;
}
+/* caller holds gpio_lock *OR* gpio is marked as requested */
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
}
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
-static int gpiod_get_direction(struct gpio_desc *desc)
+static int gpiod_get_direction(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ /* FLAG_IS_OUT is just a cache of the result of get_direction(),
+ * so it does not affect constness per se */
+ clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
if (status == 0) {
/* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
return status;
}
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gpio_desc *desc = dev_get_drvdata(dev);
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
goto done;
desc = gpio_to_desc(gpio);
+ /* reject invalid GPIOs */
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- status = -EINVAL;
-
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
+
+ status = -EINVAL;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
{
int status = -EINVAL;
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
mutex_lock(&sysfs_lock);
@@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
mutex_unlock(&sysfs_lock);
-done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
struct device *dev = NULL;
int status = -EINVAL;
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
mutex_lock(&sysfs_lock);
@@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
unlock:
mutex_unlock(&sysfs_lock);
-done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
struct device *dev = NULL;
if (!desc) {
- status = -EINVAL;
- goto done;
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return;
}
mutex_lock(&sysfs_lock);
@@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
device_unregister(dev);
put_device(dev);
}
-done:
+
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
int status = -EPROBE_DEFER;
unsigned long flags;
- spin_lock_irqsave(&gpio_lock, flags);
-
if (!desc) {
- status = -EINVAL;
- goto done;
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
}
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
chip = desc->chip;
if (chip == NULL)
goto done;
@@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
done:
if (status)
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
- desc ? desc_to_gpio(desc) : -1,
- label ? : "?", status);
+ desc_to_gpio(desc), label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
@@ -1655,13 +1670,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
return gpiod_direction_input(desc);
@@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
@@ -1725,13 +1739,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
@@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
-static int gpiod_get_value(struct gpio_desc *desc)
+static int gpiod_get_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
+ if (!desc)
+ return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
/* Should be using gpio_get_value_cansleep() */
@@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
+ if (!desc)
+ return;
chip = desc->chip;
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
@@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
-static int gpiod_cansleep(struct gpio_desc *desc)
+static int gpiod_cansleep(const struct gpio_desc *desc)
{
+ if (!desc)
+ return 0;
/* only call this on GPIOs that are valid! */
return desc->chip->can_sleep;
}
@@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
-static int gpiod_to_irq(struct gpio_desc *desc)
+static int gpiod_to_irq(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
+ if (!desc)
+ return -EINVAL;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
* Common examples include ones connected to I2C or SPI chips.
*/
-static int gpiod_get_value_cansleep(struct gpio_desc *desc)
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
might_sleep_if(extra_checks);
+ if (!desc)
+ return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : 0;
@@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
struct gpio_chip *chip;
might_sleep_if(extra_checks);
+ if (!desc)
+ return;
chip = desc->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c5b8c81b944..0a8eceb7590 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
+ dev_priv->enable_hotplug_processing = false;
}
i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
+ /* We need working interrupts for modeset enabling ... */
+ drm_irq_install(dev);
+
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
- drm_irq_install(dev);
+
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
intel_hpd_init(dev);
+ dev_priv->enable_hotplug_processing = true;
}
intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2cd97d1cc92..3c7bb0410b5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
irqreturn_t ret = IRQ_NONE;
int i;
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
return ret;
}
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
atomic_inc(&dev_priv->irq_received);
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 527b664d343..848992f67d5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1613,9 +1613,9 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 969d08c72d1..32a3693905e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
u32 temp;
temp = I915_READ(crt->adpa_reg);
- temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
temp &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d64af5aa4a1..8d0bac3c35d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_dig_port->port;
- bool wait;
uint32_t val;
+ bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a05ac2c91ba..287b42c9d1a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
+}
+
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- intel_fb = to_intel_framebuffer(crtc->fb);
- work->old_fb_obj = intel_fb->obj;
+ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ crtc->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f61cb7998c7..6f728e5ee79 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 61fee7fcdc2..a1794c6df1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 5ea5033eae0..4d932c46725 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -112,7 +112,6 @@ struct mga_framebuffer {
struct mga_fbdev {
struct drm_fb_helper helper;
struct mga_framebuffer mfb;
- struct list_head fbdev_list;
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 5a88ec51b51..d3dcf54e623 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
int ret;
int data, clock;
+ WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
WREG_DAC(MGA1064_GEN_IO_CTL, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d3d99a28dde..a274b9906ef 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
static int mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_fbdev *mfbdev = mdev->mfbdev;
+ struct drm_fb_helper *fb_helper = &mfbdev->helper;
+ struct drm_fb_helper_connector *fb_helper_conn = NULL;
+ int bpp = 32;
+ int i = 0;
+
/* FIXME: Add bandwidth and g200se limitations */
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
+ /* Validate the mode input by the user */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector) {
+ /* Found the helper for this connector */
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (fb_helper_conn->cmdline_mode.specified) {
+ if (fb_helper_conn->cmdline_mode.bpp_specified) {
+ bpp = fb_helper_conn->cmdline_mode.bpp;
+ }
+ }
+ }
+ }
+
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if (fb_helper_conn)
+ fb_helper_conn->cmdline_mode.specified = false;
+ return MODE_BAD;
+ }
+
return MODE_OK;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 5fa13267bd9..02e369f8044 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -544,13 +544,13 @@ nv50_disp_curs_ofuncs = {
static void
nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
{
- nv_mask(event->priv, 0x61002c, (1 << head), (1 << head));
+ nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
}
static void
nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
{
- nv_mask(event->priv, 0x61002c, (1 << head), (0 << head));
+ nv_mask(event->priv, 0x61002c, (4 << head), 0);
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 61cec0f6ff1..4857f913efd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2cc1e6a5eb6..9c41b58d57e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
init->offset += 2;
init_wr32(init, dreg, idata);
- init_mask(init, creg, ~mask, data | idata);
+ init_mask(init, creg, ~mask, data | iaddr);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index a114a0ed7e9..2e98e8a3f1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
/* drop port's i2c subdev refcount, i2c handles this itself */
if (ret == 0) {
list_add_tail(&port->head, &i2c->ports);
+ atomic_dec(&parent->refcount);
atomic_dec(&engine->refcount);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 41241922263..3b6dc883e15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -116,6 +116,11 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
+ /* wait for all activity to stop before releasing notify object, which
+ * may be still in use */
+ if (chan->chan && chan->ntfy)
+ nouveau_channel_idle(chan->chan);
+
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index d28430cd2ba..6e7a55f93a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
if (drm->agp.stat == UNKNOWN) {
if (!nouveau_agpmode)
return false;
+#ifdef __powerpc__
+ /* Disable AGP by default on all PowerPC machines for
+ * now -- At least some UniNorth-2 AGP bridges are
+ * known to be broken: DMA from the host to the card
+ * works just fine, but writeback from the card to the
+ * host goes straight to memory untranslated bypassing
+ * the GATT somehow, making them quite painful to deal
+ * with...
+ */
+ if (nouveau_agpmode == -1)
+ return false;
+#endif
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 11ca82148ed..7ff10711a4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -801,7 +801,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
stride = 16 * 4;
height = amount / stride;
- if (new_mem->mem_type == TTM_PL_VRAM &&
+ if (old_mem->mem_type == TTM_PL_VRAM &&
nouveau_bo_tile_layout(nvbo)) {
ret = RING_SPACE(chan, 8);
if (ret)
@@ -823,7 +823,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (old_mem->mem_type == TTM_PL_VRAM &&
+ if (new_mem->mem_type == TTM_PL_VRAM &&
nouveau_bo_tile_layout(nvbo)) {
ret = RING_SPACE(chan, 8);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a6237c9cbbc..2db57990f65 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -55,9 +55,9 @@
/* offsets in shared sync bo of various structures */
#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
#define EVO_CORE_HANDLE (0xd1500000)
#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@@ -341,10 +341,8 @@ struct nv50_curs {
struct nv50_sync {
struct nv50_dmac base;
- struct {
- u32 offset;
- u16 value;
- } sem;
+ u32 addr;
+ u32 data;
};
struct nv50_ovly {
@@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
return nv50_disp(dev)->sync;
}
+struct nv50_display_flip {
+ struct nv50_disp *disp;
+ struct nv50_sync *chan;
+};
+
+static bool
+nv50_display_flip_wait(void *data)
+{
+ struct nv50_display_flip *flip = data;
+ if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
+ flip->chan->data);
+ return true;
+ usleep_range(1, 2);
+ return false;
+}
+
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_sync *sync = nv50_sync(crtc);
+ struct nouveau_device *device = nouveau_dev(crtc->dev);
+ struct nv50_display_flip flip = {
+ .disp = nv50_disp(crtc->dev),
+ .chan = nv50_sync(crtc),
+ };
u32 *push;
- push = evo_wait(sync, 8);
+ push = evo_wait(flip.chan, 8);
if (push) {
evo_mthd(push, 0x0084, 1);
evo_data(push, 0x00000000);
@@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, sync);
+ evo_kick(push, flip.chan);
}
+
+ nv_wait_cb(device, nv50_display_flip_wait, &flip);
}
int
@@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan, u32 swap_interval)
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_sync *sync = nv50_sync(crtc);
+ int head = nv_crtc->index, ret;
u32 *push;
- int ret;
swap_interval <<= 4;
if (swap_interval == 0)
@@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
+ if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
+ OUT_RING (chan, NvEvoSema0 + head);
+ OUT_RING (chan, sync->addr ^ 0x10);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, sync->data + 1);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->addr);
+ OUT_RING (chan, sync->data);
+ } else
+ if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr ^ 0x10));
+ OUT_RING (chan, lower_32_bits(addr ^ 0x10));
+ OUT_RING (chan, sync->data + 1);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, sync->data);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
+ } else
+ if (chan) {
+ u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
- if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->sem.offset);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->sem.offset ^ 0x10);
- OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
- } else
- if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
- u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
- offset += sync->sem.offset;
-
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x00000002);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x00000001);
- } else {
- u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
- offset += sync->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x00001002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x00001001);
- }
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr ^ 0x10));
+ OUT_RING (chan, lower_32_bits(addr ^ 0x10));
+ OUT_RING (chan, sync->data + 1);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, sync->data);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ }
+ if (chan) {
+ sync->addr ^= 0x10;
+ sync->data++;
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
- 0xf00d0000 | sync->sem.value);
evo_sync(crtc->dev);
}
@@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_data(push, 0x40000000);
}
evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->sem.offset);
- evo_data(push, 0xf00d0000 | sync->sem.value);
- evo_data(push, 0x74b1e000);
+ evo_data(push, sync->addr);
+ evo_data(push, sync->data++);
+ evo_data(push, sync->data);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x00a0, 2);
evo_data(push, 0x00000000);
@@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
-
- sync->sem.offset ^= 0x10;
- sync->sem.value++;
return 0;
}
@@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
if (ret)
goto out;
- head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+ head->sync.addr = EVO_FLIP_SEM0(index);
+ head->sync.data = 0x00000000;
/* allocate overlay resources */
ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
int
nv50_display_init(struct drm_device *dev)
{
- u32 *push = evo_wait(nv50_mast(dev), 32);
- if (push) {
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_kick(push, nv50_mast(dev));
- return 0;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_crtc *crtc;
+ u32 *push;
+
+ push = evo_wait(nv50_mast(dev), 32);
+ if (!push)
+ return -EBUSY;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nv50_sync *sync = nv50_sync(crtc);
+ nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
}
- return -EBUSY;
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return 0;
}
void
@@ -2245,6 +2276,7 @@ nv50_display_create(struct drm_device *dev)
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
dcbe->location, dcbe->type,
ffs(dcbe->or) - 1, ret);
+ ret = 0;
}
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3c38ea46531..305a657bf21 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 99fb13286fd..eb8ac315f92 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, toffset, surf.base_align);
return -EINVAL;
}
- if (moffset & (surf.base_align - 1)) {
+ if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
__func__, __LINE__, moffset, surf.base_align);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7cead763be9..d4c633e1286 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6d4b5611daf..0740db3fcd2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
if (r600_is_display_hung(rdev))
reset_mask |= RADEON_RESET_DISPLAY;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e403bdda58..78edadc9e86 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
found = 1;
}
+ /* quirks */
+ /* Radeon 9100 (R200) */
+ if ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7149)) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 167758488ed..66a7f0fd962 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -70,9 +70,10 @@
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
* 2.29.0 - R500 FP16 color clear registers
+ * 2.30.0 - fix for FMASK texturing
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 29
+#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 90374dd7796..48f80cd42d8 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 80979ed951e..9128120da04 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index c92955df065..be1daf7344d 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,7 +4,6 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 9500f2f3f8f..8758f38c948 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -459,19 +459,25 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
struct hid_device *hdev = djrcv_dev->hdev;
- int sent_bytes;
+ struct hid_report *report;
+ struct hid_report_enum *output_report_enum;
+ u8 *data = (u8 *)(&dj_report->device_index);
+ int i;
- if (!hdev->hid_output_raw_report) {
- dev_err(&hdev->dev, "%s:"
- "hid_output_raw_report is null\n", __func__);
+ output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
+ report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+
+ if (!report) {
+ dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
return -ENODEV;
}
- sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report,
- sizeof(struct dj_report),
- HID_OUTPUT_REPORT);
+ for (i = 0; i < report->field[0]->report_count; i++)
+ report->field[0]->value[i] = data[i];
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
- return (sent_bytes < 0) ? sent_bytes : 0;
+ return 0;
}
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index 41df29f59b0..ebbb9f4f27a 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -422,6 +422,7 @@ static struct attribute *pem_input_attributes[] = {
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
+ NULL
};
static const struct attribute_group pem_input_group = {
@@ -432,6 +433,7 @@ static struct attribute *pem_fan_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
+ NULL
};
static const struct attribute_group pem_fan_group = {
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 9652a2c92a2..6d6130752f9 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -59,10 +59,10 @@ enum chips { ltc2978, ltc3880 };
struct ltc2978_data {
enum chips id;
int vin_min, vin_max;
- int temp_min, temp_max;
+ int temp_min, temp_max[2];
int vout_min[8], vout_max[8];
int iout_max[2];
- int temp2_max[2];
+ int temp2_max;
struct pmbus_driver_info info;
};
@@ -113,9 +113,10 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
ret = pmbus_read_word_data(client, page,
LTC2978_MFR_TEMPERATURE_PEAK);
if (ret >= 0) {
- if (lin11_to_val(ret) > lin11_to_val(data->temp_max))
- data->temp_max = ret;
- ret = data->temp_max;
+ if (lin11_to_val(ret)
+ > lin11_to_val(data->temp_max[page]))
+ data->temp_max[page] = ret;
+ ret = data->temp_max[page];
}
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -204,10 +205,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
ret = pmbus_read_word_data(client, page,
LTC3880_MFR_TEMPERATURE2_PEAK);
if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->temp2_max[page]))
- data->temp2_max[page] = ret;
- ret = data->temp2_max[page];
+ if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
+ data->temp2_max = ret;
+ ret = data->temp2_max;
}
break;
case PMBUS_VIRT_READ_VIN_MIN:
@@ -248,11 +248,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_VIRT_RESET_IOUT_HISTORY:
- data->iout_max[page] = 0x7fff;
+ data->iout_max[page] = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_TEMP2_HISTORY:
- data->temp2_max[page] = 0x7fff;
+ data->temp2_max = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -262,12 +262,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
break;
case PMBUS_VIRT_RESET_VIN_HISTORY:
data->vin_min = 0x7bff;
- data->vin_max = 0;
+ data->vin_max = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_TEMP_HISTORY:
data->temp_min = 0x7bff;
- data->temp_max = 0x7fff;
+ data->temp_max[page] = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
default:
@@ -321,12 +321,14 @@ static int ltc2978_probe(struct i2c_client *client,
info = &data->info;
info->write_word_data = ltc2978_write_word_data;
- data->vout_min[0] = 0xffff;
data->vin_min = 0x7bff;
+ data->vin_max = 0x7c00;
data->temp_min = 0x7bff;
- data->temp_max = 0x7fff;
+ for (i = 0; i < ARRAY_SIZE(data->temp_max); i++)
+ data->temp_max[i] = 0x7c00;
+ data->temp2_max = 0x7c00;
- switch (id->driver_data) {
+ switch (data->id) {
case ltc2978:
info->read_word_data = ltc2978_read_word_data;
info->pages = 8;
@@ -336,7 +338,6 @@ static int ltc2978_probe(struct i2c_client *client,
for (i = 1; i < 8; i++) {
info->func[i] = PMBUS_HAVE_VOUT
| PMBUS_HAVE_STATUS_VOUT;
- data->vout_min[i] = 0xffff;
}
break;
case ltc3880:
@@ -352,11 +353,14 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
- data->vout_min[1] = 0xffff;
+ data->iout_max[0] = 0x7c00;
+ data->iout_max[1] = 0x7c00;
break;
default:
return -ENODEV;
}
+ for (i = 0; i < info->pages; i++)
+ data->vout_min[i] = 0xffff;
return pmbus_do_probe(client, id, info);
}
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 80eef50c50f..9add60920ac 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -766,12 +766,14 @@ static ssize_t pmbus_show_label(struct device *dev,
static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
{
if (data->num_attributes >= data->max_attributes - 1) {
- data->max_attributes += PMBUS_ATTR_ALLOC_SIZE;
- data->group.attrs = krealloc(data->group.attrs,
- sizeof(struct attribute *) *
- data->max_attributes, GFP_KERNEL);
- if (data->group.attrs == NULL)
+ int new_max_attrs = data->max_attributes + PMBUS_ATTR_ALLOC_SIZE;
+ void *new_attrs = krealloc(data->group.attrs,
+ new_max_attrs * sizeof(void *),
+ GFP_KERNEL);
+ if (!new_attrs)
return -ENOMEM;
+ data->group.attrs = new_attrs;
+ data->max_attributes = new_max_attrs;
}
data->group.attrs[data->num_attributes++] = attr;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index bfe326e896d..2507f902fb7 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -965,7 +965,13 @@ static int sht15_probe(struct platform_device *pdev)
if (voltage)
data->supply_uv = voltage;
- regulator_enable(data->reg);
+ ret = regulator_enable(data->reg);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
/*
* Setup a notifier block to update this if another device
* causes the voltage to change
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 0198324a8b0..bd33473f8e3 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -62,7 +62,7 @@ st_sensors_match_odr_error:
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
int err;
- struct st_sensor_odr_avl odr_out;
+ struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
err = st_sensors_match_odr(sdata->sensor, odr, &odr_out);
@@ -114,7 +114,7 @@ st_sensors_match_odr_error:
static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
{
- int err, i;
+ int err, i = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
err = st_sensors_match_fs(sdata->sensor, fs, &i);
@@ -139,14 +139,13 @@ st_accel_set_fullscale_error:
int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
{
- bool found;
u8 tmp_value;
int err = -EINVAL;
- struct st_sensor_odr_avl odr_out;
+ bool found = false;
+ struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
if (enable) {
- found = false;
tmp_value = sdata->sensor->pw.value_on;
if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
(sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 2fe1d4edcb2..74f2d52795f 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -27,7 +27,6 @@
#define AD5064_ADDR(x) ((x) << 20)
#define AD5064_CMD(x) ((x) << 24)
-#define AD5064_ADDR_DAC(chan) (chan)
#define AD5064_ADDR_ALL_DAC 0xF
#define AD5064_CMD_WRITE_INPUT_N 0x0
@@ -131,15 +130,15 @@ static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
}
static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
- unsigned int channel)
+ const struct iio_chan_spec *chan)
{
unsigned int val;
int ret;
- val = (0x1 << channel);
+ val = (0x1 << chan->address);
- if (st->pwr_down[channel])
- val |= st->pwr_down_mode[channel] << 8;
+ if (st->pwr_down[chan->channel])
+ val |= st->pwr_down_mode[chan->channel] << 8;
ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
@@ -169,7 +168,7 @@ static int ad5064_set_powerdown_mode(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
st->pwr_down_mode[chan->channel] = mode + 1;
- ret = ad5064_sync_powerdown_mode(st, chan->channel);
+ ret = ad5064_sync_powerdown_mode(st, chan);
mutex_unlock(&indio_dev->mlock);
return ret;
@@ -205,7 +204,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
st->pwr_down[chan->channel] = pwr_down;
- ret = ad5064_sync_powerdown_mode(st, chan->channel);
+ ret = ad5064_sync_powerdown_mode(st, chan);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
@@ -258,7 +257,7 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > (1 << chan->scan_type.realbits) || val < 0)
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
@@ -292,34 +291,44 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
{ },
};
-#define AD5064_CHANNEL(chan, bits) { \
+#define AD5064_CHANNEL(chan, addr, bits) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = (chan), \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
- .address = AD5064_ADDR_DAC(chan), \
+ .address = addr, \
.scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \
.ext_info = ad5064_ext_info, \
}
#define DECLARE_AD5064_CHANNELS(name, bits) \
const struct iio_chan_spec name[] = { \
- AD5064_CHANNEL(0, bits), \
- AD5064_CHANNEL(1, bits), \
- AD5064_CHANNEL(2, bits), \
- AD5064_CHANNEL(3, bits), \
- AD5064_CHANNEL(4, bits), \
- AD5064_CHANNEL(5, bits), \
- AD5064_CHANNEL(6, bits), \
- AD5064_CHANNEL(7, bits), \
+ AD5064_CHANNEL(0, 0, bits), \
+ AD5064_CHANNEL(1, 1, bits), \
+ AD5064_CHANNEL(2, 2, bits), \
+ AD5064_CHANNEL(3, 3, bits), \
+ AD5064_CHANNEL(4, 4, bits), \
+ AD5064_CHANNEL(5, 5, bits), \
+ AD5064_CHANNEL(6, 6, bits), \
+ AD5064_CHANNEL(7, 7, bits), \
+}
+
+#define DECLARE_AD5065_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5064_CHANNEL(0, 0, bits), \
+ AD5064_CHANNEL(1, 3, bits), \
}
static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16);
+
static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
[ID_AD5024] = {
.shared_vref = false,
@@ -328,7 +337,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
},
[ID_AD5025] = {
.shared_vref = false,
- .channels = ad5024_channels,
+ .channels = ad5025_channels,
.num_channels = 2,
},
[ID_AD5044] = {
@@ -338,7 +347,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
},
[ID_AD5045] = {
.shared_vref = false,
- .channels = ad5044_channels,
+ .channels = ad5045_channels,
.num_channels = 2,
},
[ID_AD5064] = {
@@ -353,7 +362,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
},
[ID_AD5065] = {
.shared_vref = false,
- .channels = ad5064_channels,
+ .channels = ad5065_channels,
.num_channels = 2,
},
[ID_AD5628_1] = {
@@ -429,6 +438,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
{
struct iio_dev *indio_dev;
struct ad5064_state *st;
+ unsigned int midscale;
unsigned int i;
int ret;
@@ -465,11 +475,6 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
goto error_free_reg;
}
- for (i = 0; i < st->chip_info->num_channels; ++i) {
- st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
- st->dac_cache[i] = 0x8000;
- }
-
indio_dev->dev.parent = dev;
indio_dev->name = name;
indio_dev->info = &ad5064_info;
@@ -477,6 +482,13 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ midscale = (1 << indio_dev->channels[0].scan_type.realbits) / 2;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
+ st->dac_cache[i] = midscale;
+ }
+
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index b5cfa3a354c..361b2328453 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -5,6 +5,7 @@
config INV_MPU6050_IIO
tristate "Invensense MPU6050 devices"
depends on I2C && SYSFS
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
This driver supports the Invensense MPU6050 devices.
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index a479375a8fd..e0c404bdc4a 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -410,6 +410,7 @@ static struct file_system_type ipathfs_fs_type = {
.mount = ipathfs_mount,
.kill_sb = ipathfs_kill_super,
};
+MODULE_ALIAS_FS("ipathfs");
int __init ipath_init_ipathfs(void)
{
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index e0d79b2395e..add98d01476 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -362,7 +362,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT;
idr_init(&dev->sriov.pv_id_table);
- idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
}
/* slave = -1 ==> all slaves */
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 644bd6f6467..f247fc6e618 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -604,6 +604,7 @@ static struct file_system_type qibfs_fs_type = {
.mount = qibfs_mount,
.kill_sb = qibfs_kill_super,
};
+MODULE_ALIAS_FS("ipathfs");
int __init qib_init_qibfs(void)
{
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 2fb0d76a04c..208de7cbb7f 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,8 +70,6 @@
#define TC3589x_EVT_INT_CLR 0x2
#define TC3589x_KBD_INT_CLR 0x1
-#define TC3589x_KBD_KEYMAP_SIZE 64
-
/**
* struct tc_keypad - data structure used by keypad driver
* @tc3589x: pointer to tc35893
@@ -88,7 +86,7 @@ struct tc_keypad {
const struct tc3589x_keypad_platform_data *board;
unsigned int krow;
unsigned int kcol;
- unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE];
+ unsigned short *keymap;
bool keypad_stopped;
};
@@ -338,12 +336,14 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
- keypad->keymap, input);
+ NULL, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
goto err_free_mem;
}
+ keypad->keymap = input->keycode;
+
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7b99fc7c943..0238e0e1433 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -490,6 +490,29 @@ static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
f->y_map |= (p[5] & 0x20) << 6;
}
+static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
+{
+ f->first_mp = !!(p[0] & 0x02);
+ f->is_mp = !!(p[0] & 0x20);
+
+ f->fingers = ((p[0] & 0x6) >> 1 |
+ (p[0] & 0x10) >> 2);
+ f->x_map = ((p[2] & 0x60) >> 5) |
+ ((p[4] & 0x7f) << 2) |
+ ((p[5] & 0x7f) << 9) |
+ ((p[3] & 0x07) << 16) |
+ ((p[3] & 0x70) << 15) |
+ ((p[0] & 0x01) << 22);
+ f->y_map = (p[1] & 0x7f) |
+ ((p[2] & 0x1f) << 7);
+
+ f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
+ f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
+ f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
+
+ alps_decode_buttons_v3(f, p);
+}
+
static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
@@ -874,7 +897,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
/* Bytes 2 - pktsize should have 0 in the highest bit */
- if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
+ if (priv->proto_version != ALPS_PROTO_V5 &&
+ psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
@@ -994,8 +1018,7 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
return 0;
}
-static int alps_enter_command_mode(struct psmouse *psmouse,
- unsigned char *resp)
+static int alps_enter_command_mode(struct psmouse *psmouse)
{
unsigned char param[4];
@@ -1004,14 +1027,12 @@ static int alps_enter_command_mode(struct psmouse *psmouse,
return -1;
}
- if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
+ if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) &&
+ param[0] != 0x73) {
psmouse_dbg(psmouse,
"unknown response while entering command mode\n");
return -1;
}
-
- if (resp)
- *resp = param[2];
return 0;
}
@@ -1176,7 +1197,7 @@ static int alps_passthrough_mode_v3(struct psmouse *psmouse,
{
int reg_val, ret = -1;
- if (alps_enter_command_mode(psmouse, NULL))
+ if (alps_enter_command_mode(psmouse))
return -1;
reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
@@ -1216,7 +1237,7 @@ static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
{
int ret = -EIO, reg_val;
- if (alps_enter_command_mode(psmouse, NULL))
+ if (alps_enter_command_mode(psmouse))
goto error;
reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
@@ -1279,7 +1300,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
* supported by this driver. If bit 1 isn't set the packet
* format is different.
*/
- if (alps_enter_command_mode(psmouse, NULL) ||
+ if (alps_enter_command_mode(psmouse) ||
alps_command_mode_write_reg(psmouse,
reg_base + 0x08, 0x82) ||
alps_exit_command_mode(psmouse))
@@ -1306,7 +1327,7 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
goto error;
- if (alps_enter_command_mode(psmouse, NULL) ||
+ if (alps_enter_command_mode(psmouse) ||
alps_absolute_mode_v3(psmouse)) {
psmouse_err(psmouse, "Failed to enter absolute mode\n");
goto error;
@@ -1381,7 +1402,7 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
priv->flags &= ~ALPS_DUALPOINT;
}
- if (alps_enter_command_mode(psmouse, NULL) ||
+ if (alps_enter_command_mode(psmouse) ||
alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
goto error;
@@ -1431,7 +1452,7 @@ static int alps_hw_init_v4(struct psmouse *psmouse)
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[4];
- if (alps_enter_command_mode(psmouse, NULL))
+ if (alps_enter_command_mode(psmouse))
goto error;
if (alps_absolute_mode_v4(psmouse)) {
@@ -1499,6 +1520,23 @@ error:
return -1;
}
+static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[2];
+
+ /* This is dolphin "v1" as empirically defined by florin9doi */
+ param[0] = 0x64;
+ param[1] = 0x28;
+
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ return 0;
+}
+
static void alps_set_defaults(struct alps_data *priv)
{
priv->byte0 = 0x8f;
@@ -1532,6 +1570,21 @@ static void alps_set_defaults(struct alps_data *priv)
priv->nibble_commands = alps_v4_nibble_commands;
priv->addr_command = PSMOUSE_CMD_DISABLE;
break;
+ case ALPS_PROTO_V5:
+ priv->hw_init = alps_hw_init_dolphin_v1;
+ priv->process_packet = alps_process_packet_v3;
+ priv->decode_fields = alps_decode_dolphin;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+ priv->byte0 = 0xc8;
+ priv->mask0 = 0xc8;
+ priv->flags = 0;
+ priv->x_max = 1360;
+ priv->y_max = 660;
+ priv->x_bits = 23;
+ priv->y_bits = 12;
+ break;
}
}
@@ -1592,6 +1645,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
if (alps_match_table(psmouse, priv, e7, ec) == 0) {
return 0;
+ } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
+ ec[0] == 0x73 && ec[1] == 0x01) {
+ priv->proto_version = ALPS_PROTO_V5;
+ alps_set_defaults(priv);
+
+ return 0;
} else if (ec[0] == 0x88 && ec[1] == 0x08) {
priv->proto_version = ALPS_PROTO_V3;
alps_set_defaults(priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 970480551b6..eee59853b9c 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -16,6 +16,7 @@
#define ALPS_PROTO_V2 2
#define ALPS_PROTO_V3 3
#define ALPS_PROTO_V4 4
+#define ALPS_PROTO_V5 5
/**
* struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 1673dc6c809..f51765fff05 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -236,6 +236,13 @@ static int cypress_read_fw_version(struct psmouse *psmouse)
cytp->fw_version = param[2] & FW_VERSION_MASX;
cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
+ /*
+ * Trackpad fw_version 11 (in Dell XPS12) yields a bogus response to
+ * CYTP_CMD_READ_TP_METRICS so do not try to use it. LP: #1103594.
+ */
+ if (cytp->fw_version >= 11)
+ cytp->tp_metrics_supported = 0;
+
psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
cytp->tp_metrics_supported);
@@ -258,6 +265,9 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+ if (!cytp->tp_metrics_supported)
+ return 0;
+
memset(param, 0, sizeof(param));
if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
/* Update trackpad parameters. */
@@ -315,18 +325,15 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
static int cypress_query_hardware(struct psmouse *psmouse)
{
- struct cytp_data *cytp = psmouse->private;
int ret;
ret = cypress_read_fw_version(psmouse);
if (ret)
return ret;
- if (cytp->tp_metrics_supported) {
- ret = cypress_read_tp_metrics(psmouse);
- if (ret)
- return ret;
- }
+ ret = cypress_read_tp_metrics(psmouse);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 41b6fbf6011..1daa97913b7 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2017,6 +2017,9 @@ static const struct wacom_features wacom_features_0x100 =
static const struct wacom_features wacom_features_0x101 =
{ "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10D =
+ { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2201,6 +2204,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEF) },
{ USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) },
+ { USB_DEVICE_WACOM(0x10D) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 4f702b3ec1a..434c3df250c 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -236,7 +236,12 @@ static void __ads7846_disable(struct ads7846 *ts)
/* Must be called with ts->lock held */
static void __ads7846_enable(struct ads7846 *ts)
{
- regulator_enable(ts->reg);
+ int error;
+
+ error = regulator_enable(ts->reg);
+ if (error != 0)
+ dev_err(&ts->spi->dev, "Failed to enable supply: %d\n", error);
+
ads7846_restart(ts);
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index d04f810cb1d..59aa24002c7 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -176,11 +176,17 @@
/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
#define MXT_BACKUP_VALUE 0x55
-#define MXT_BACKUP_TIME 25 /* msec */
-#define MXT_RESET_TIME 65 /* msec */
+#define MXT_BACKUP_TIME 50 /* msec */
+#define MXT_RESET_TIME 200 /* msec */
#define MXT_FWRESET_TIME 175 /* msec */
+/* MXT_SPT_GPIOPWM_T19 field */
+#define MXT_GPIO0_MASK 0x04
+#define MXT_GPIO1_MASK 0x08
+#define MXT_GPIO2_MASK 0x10
+#define MXT_GPIO3_MASK 0x20
+
/* Command to unlock bootloader */
#define MXT_UNLOCK_CMD_MSB 0xaa
#define MXT_UNLOCK_CMD_LSB 0xdc
@@ -212,6 +218,8 @@
/* Touchscreen absolute values */
#define MXT_MAX_AREA 0xff
+#define MXT_PIXELS_PER_MM 20
+
struct mxt_info {
u8 family_id;
u8 variant_id;
@@ -243,6 +251,8 @@ struct mxt_data {
const struct mxt_platform_data *pdata;
struct mxt_object *object_table;
struct mxt_info info;
+ bool is_tp;
+
unsigned int irq;
unsigned int max_x;
unsigned int max_y;
@@ -251,6 +261,7 @@ struct mxt_data {
u8 T6_reportid;
u8 T9_reportid_min;
u8 T9_reportid_max;
+ u8 T19_reportid;
};
static bool mxt_object_readable(unsigned int type)
@@ -502,6 +513,21 @@ static int mxt_write_object(struct mxt_data *data,
return mxt_write_reg(data->client, reg + offset, val);
}
+static void mxt_input_button(struct mxt_data *data, struct mxt_message *message)
+{
+ struct input_dev *input = data->input_dev;
+ bool button;
+ int i;
+
+ /* Active-low switch */
+ for (i = 0; i < MXT_NUM_GPIO; i++) {
+ if (data->pdata->key_map[i] == KEY_RESERVED)
+ continue;
+ button = !(message->message[0] & MXT_GPIO0_MASK << i);
+ input_report_key(input, data->pdata->key_map[i], button);
+ }
+}
+
static void mxt_input_touchevent(struct mxt_data *data,
struct mxt_message *message, int id)
{
@@ -585,6 +611,9 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
int id = reportid - data->T9_reportid_min;
mxt_input_touchevent(data, &message, id);
update_input = true;
+ } else if (message.reportid == data->T19_reportid) {
+ mxt_input_button(data, &message);
+ update_input = true;
} else {
mxt_dump_message(dev, &message);
}
@@ -764,6 +793,9 @@ static int mxt_get_object_table(struct mxt_data *data)
data->T9_reportid_min = min_id;
data->T9_reportid_max = max_id;
break;
+ case MXT_SPT_GPIOPWM_T19:
+ data->T19_reportid = min_id;
+ break;
}
}
@@ -777,7 +809,7 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T6_reportid = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
-
+ data->T19_reportid = 0;
}
static int mxt_initialize(struct mxt_data *data)
@@ -1115,9 +1147,13 @@ static int mxt_probe(struct i2c_client *client,
goto err_free_mem;
}
- input_dev->name = "Atmel maXTouch Touchscreen";
+ data->is_tp = pdata && pdata->is_tp;
+
+ input_dev->name = (data->is_tp) ? "Atmel maXTouch Touchpad" :
+ "Atmel maXTouch Touchscreen";
snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
client->adapter->nr, client->addr);
+
input_dev->phys = data->phys;
input_dev->id.bustype = BUS_I2C;
@@ -1140,6 +1176,29 @@ static int mxt_probe(struct i2c_client *client,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
+ if (data->is_tp) {
+ int i;
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
+ for (i = 0; i < MXT_NUM_GPIO; i++)
+ if (pdata->key_map[i] != KEY_RESERVED)
+ __set_bit(pdata->key_map[i], input_dev->keybit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input_dev->keybit);
+
+ input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ MXT_PIXELS_PER_MM);
+ }
+
/* For single touch */
input_set_abs_params(input_dev, ABS_X,
0, data->max_x, 0, 0);
@@ -1258,6 +1317,7 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
+ { "atmel_mxt_tp", 0 },
{ "mXT224", 0 },
{ }
};
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 4a29ddf6bf1..1443532fe6c 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -314,15 +314,27 @@ static int mms114_start(struct mms114_data *data)
struct i2c_client *client = data->client;
int error;
- if (data->core_reg)
- regulator_enable(data->core_reg);
- if (data->io_reg)
- regulator_enable(data->io_reg);
+ error = regulator_enable(data->core_reg);
+ if (error) {
+ dev_err(&client->dev, "Failed to enable avdd: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(data->io_reg);
+ if (error) {
+ dev_err(&client->dev, "Failed to enable vdd: %d\n", error);
+ regulator_disable(data->core_reg);
+ return error;
+ }
+
mdelay(MMS114_POWERON_DELAY);
error = mms114_setup_regs(data);
- if (error < 0)
+ if (error < 0) {
+ regulator_disable(data->io_reg);
+ regulator_disable(data->core_reg);
return error;
+ }
if (data->pdata->cfg_pin)
data->pdata->cfg_pin(true);
@@ -335,16 +347,20 @@ static int mms114_start(struct mms114_data *data)
static void mms114_stop(struct mms114_data *data)
{
struct i2c_client *client = data->client;
+ int error;
disable_irq(client->irq);
if (data->pdata->cfg_pin)
data->pdata->cfg_pin(false);
- if (data->io_reg)
- regulator_disable(data->io_reg);
- if (data->core_reg)
- regulator_disable(data->core_reg);
+ error = regulator_disable(data->io_reg);
+ if (error)
+ dev_warn(&client->dev, "Failed to disable vdd: %d\n", error);
+
+ error = regulator_disable(data->core_reg);
+ if (error)
+ dev_warn(&client->dev, "Failed to disable avdd: %d\n", error);
}
static int mms114_input_open(struct input_dev *dev)
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc7e478b7e5..e5cdaf87822 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1083,6 +1083,7 @@ static const char *dma_remap_fault_reasons[] =
"non-zero reserved fields in RTP",
"non-zero reserved fields in CTP",
"non-zero reserved fields in PTE",
+ "PCE for translation request specifies blocking",
};
static const char *irq_remap_fault_reasons[] =
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 644d7246842..a32e0d5aa45 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -648,7 +648,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
- map |= 1 << cpu_logical_map(cpu);
+ map |= gic_cpu_map[cpu];
/*
* Ensure that stores to Normal memory are visible to the
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 017c67ea3f4..ead0a4fb744 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
// Allocate URBs and buffers for interrupt endpoint
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- return -ENOMEM;
+ goto err1;
}
intr->urb = urb;
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
if (!buf) {
- return -ENOMEM;
+ goto err2;
}
endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
endpoint->desc.bInterval);
return 0;
+err2:
+ usb_free_urb(intr->urb);
+ intr->urb = NULL;
+err1:
+ usb_free_urb(ctrl->urb);
+ ctrl->urb = NULL;
+
+ return -ENOMEM;
}
/*
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d8a7d832341..ebaebdf30f9 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg)
int j;
int l;
- l = strlen(msg);
+ l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg)
+ + sizeof(cmd.parm.cmsg.para) - 2);
+
if (!l) {
isdn_tty_modem_result(RESULT_ERROR, info);
return;
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index c45b3aedafb..d873cbae2fb 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
-static int __init pl320_probe(struct amba_device *adev,
- const struct amba_id *id)
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e30b490055a..4d8d90b4fe7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -154,17 +154,6 @@ config MD_RAID456
If unsure, say Y.
-config MULTICORE_RAID456
- bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
- depends on MD_RAID456
- depends on SMP
- depends on EXPERIMENTAL
- ---help---
- Enable the raid456 module to dispatch per-stripe raid operations to a
- thread pool.
-
- If unsure, say N.
-
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a01d1e4c78..311e3d35b27 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -91,15 +91,44 @@ static struct raid_type {
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
};
+static char *raid10_md_layout_to_format(int layout)
+{
+ /*
+ * Bit 16 and 17 stand for "offset" and "use_far_sets"
+ * Refer to MD's raid10.c for details
+ */
+ if ((layout & 0x10000) && (layout & 0x20000))
+ return "offset";
+
+ if ((layout & 0xFF) > 1)
+ return "near";
+
+ return "far";
+}
+
static unsigned raid10_md_layout_to_copies(int layout)
{
- return layout & 0xFF;
+ if ((layout & 0xFF) > 1)
+ return layout & 0xFF;
+ return (layout >> 8) & 0xFF;
}
static int raid10_format_to_md_layout(char *format, unsigned copies)
{
- /* 1 "far" copy, and 'copies' "near" copies */
- return (1 << 8) | (copies & 0xFF);
+ unsigned n = 1, f = 1;
+
+ if (!strcmp("near", format))
+ n = copies;
+ else
+ f = copies;
+
+ if (!strcmp("offset", format))
+ return 0x30000 | (f << 8) | n;
+
+ if (!strcmp("far", format))
+ return 0x20000 | (f << 8) | n;
+
+ return (f << 8) | n;
}
static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
+ unsigned group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
* as long as the failed devices occur in different mirror
* groups (i.e. different stripes).
*
- * Right now, we only allow for "near" copies. When other
- * formats are added, we will have to check those too.
- *
* When checking "near" format, make sure no adjacent devices
* have failed beyond what can be handled. In addition to the
* simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
* A A B B C
* C D D E E
*/
- for (i = 0; i < rs->md.raid_disks * copies; i++) {
- if (!(i % copies))
+ if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
+ for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
+ d = i % rs->md.raid_disks;
+ if ((!rs->dev[d].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ (++rebuilds_per_group >= copies))
+ goto too_many;
+ }
+ break;
+ }
+
+ /*
+ * When checking "far" and "offset" formats, we need to ensure
+ * that the device that holds its copy is not also dead or
+ * being rebuilt. (Note that "far" and "offset" formats only
+ * support two copies right now. These formats also only ever
+ * use the 'use_far_sets' variant.)
+ *
+ * This check is somewhat complicated by the need to account
+ * for arrays that are not a multiple of (far) copies. This
+ * results in the need to treat the last (potentially larger)
+ * set differently.
+ */
+ group_size = (rs->md.raid_disks / copies);
+ last_group_start = (rs->md.raid_disks / group_size) - 1;
+ last_group_start *= group_size;
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
- d = i % rs->md.raid_disks;
- if ((!rs->dev[d].rdev.sb_page ||
- !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ if ((!rs->dev[i].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
- goto too_many;
+ goto too_many;
}
break;
default:
@@ -433,7 +487,7 @@ too_many:
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
- * [raid10_format <near>] Layout algorithm. (Default: near)
+ * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
return -EINVAL;
}
- if (strcmp("near", argv[i])) {
+ if (strcmp("near", argv[i]) &&
+ strcmp("far", argv[i]) &&
+ strcmp("offset", argv[i])) {
rs->ti->error = "Invalid 'raid10_format' value given";
return -EINVAL;
}
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
+ /*
+ * If the format is not "near", we only support
+ * two copies at the moment.
+ */
+ if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
+ rs->ti->error = "Too many copies for given RAID10 format.";
+ return -EINVAL;
+ }
+
/* (Len * #mirrors) / #devices */
sectors_per_dev = rs->ti->len * raid10_copies;
sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
/*
* Reshaping is not currently allowed
*/
- if ((le32_to_cpu(sb->level) != mddev->level) ||
- (le32_to_cpu(sb->layout) != mddev->layout) ||
- (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
- DMERR("Reshaping arrays not yet supported.");
+ if (le32_to_cpu(sb->level) != mddev->level) {
+ DMERR("Reshaping arrays not yet supported. (RAID level change)");
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->layout) != mddev->layout) {
+ DMERR("Reshaping arrays not yet supported. (RAID layout change)");
+ DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+ DMERR(" Old layout: %s w/ %d copies",
+ raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+ raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+ DMERR(" New layout: %s w/ %d copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
return -EINVAL;
}
/* We can only change the number of devices in RAID1 right now */
if ((rs->raid_type->level != 1) &&
(le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping arrays not yet supported.");
+ DMERR("Reshaping arrays not yet supported. (device count change)");
return -EINVAL;
}
@@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
raid10_md_layout_to_copies(rs->md.layout));
if (rs->print_flags & DMPF_RAID10_FORMAT)
- DMEMIT(" raid10_format near");
+ DMEMIT(" raid10_format %s",
+ raid10_md_layout_to_format(rs->md.layout));
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
static int __init dm_raid_init(void)
{
+ DMINFO("Loading target version %u.%u.%u",
+ raid_target.version[0],
+ raid_target.version[1],
+ raid_target.version[2]);
return dm_register_target(&raid_target);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3db3d1b271f..fcb878f8879 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
bio_io_error(bio);
return;
}
+ if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+ bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+ return;
+ }
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
+ if (!my_mddev->pers->resize)
+ /* Cannot change size for RAID0 or Linear etc */
+ return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ /* mddev_unlock will wake thread */
+ /* If a device failed while we were read-only, we
+ * need to make sure the metadata is updated now.
+ */
+ if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ mddev_unlock(mddev);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ mddev_lock(mddev);
+ }
} else {
err = -EROFS;
goto abort_unlock;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 24b359717a7..0505452de8d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->new_raid_disk = j;
}
- if (j < 0 || j >= mddev->raid_disks) {
+ if (j < 0) {
+ printk(KERN_ERR
+ "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
+ goto abort;
+ }
+ if (j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
"aborting!\n", mdname(mddev), j);
goto abort;
@@ -289,7 +295,7 @@ abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- *private_conf = NULL;
+ *private_conf = ERR_PTR(err);
return err;
}
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
"%s does not support generic reshape\n", __func__);
rdev_for_each(rdev, mddev)
- array_sectors += rdev->sectors;
+ array_sectors += (rdev->sectors &
+ ~(sector_t)(mddev->chunk_sectors-1));
return array_sectors;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d5bddfc4010..fd86b372692 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
+ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+ mbio->bi_rw =
+ WRITE | do_flush_fua | do_sync | do_discard | do_same;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
+ if (mddev->queue)
+ blk_queue_max_write_same_sectors(mddev->queue,
+ mddev->chunk_sectors);
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 64d48249c03..77b562d18a9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -38,21 +38,36 @@
* near_copies (stored in low byte of layout)
* far_copies (stored in second byte of layout)
* far_offset (stored in bit 16 of layout )
+ * use_far_sets (stored in bit 17 of layout )
*
- * The data to be stored is divided into chunks using chunksize.
- * Each device is divided into far_copies sections.
- * In each section, chunks are laid out in a style similar to raid0, but
- * near_copies copies of each chunk is stored (each on a different drive).
- * The starting device for each section is offset near_copies from the starting
- * device of the previous section.
- * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
- * drive.
- * near_copies and far_copies must be at least one, and their product is at most
- * raid_disks.
+ * The data to be stored is divided into chunks using chunksize. Each device
+ * is divided into far_copies sections. In each section, chunks are laid out
+ * in a style similar to raid0, but near_copies copies of each chunk is stored
+ * (each on a different drive). The starting device for each section is offset
+ * near_copies from the starting device of the previous section. Thus there
+ * are (near_copies * far_copies) of each chunk, and each is on a different
+ * drive. near_copies and far_copies must be at least one, and their product
+ * is at most raid_disks.
*
* If far_offset is true, then the far_copies are handled a bit differently.
- * The copies are still in different stripes, but instead of be very far apart
- * on disk, there are adjacent stripes.
+ * The copies are still in different stripes, but instead of being very far
+ * apart on disk, there are adjacent stripes.
+ *
+ * The far and offset algorithms are handled slightly differently if
+ * 'use_far_sets' is true. In this case, the array's devices are grouped into
+ * sets that are (near_copies * far_copies) in size. The far copied stripes
+ * are still shifted by 'near_copies' devices, but this shifting stays confined
+ * to the set rather than the entire array. This is done to improve the number
+ * of device combinations that can fail without causing the array to fail.
+ * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
+ * on a device):
+ * A B C D A B C D E
+ * ... ...
+ * D A B C E A B C D
+ * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
+ * [A B] [C D] [A B] [C D E]
+ * |...| |...| |...| | ... |
+ * [B A] [D C] [B A] [E C D]
*/
/*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
sector_t stripe;
int dev;
int slot = 0;
+ int last_far_set_start, last_far_set_size;
+
+ last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+ last_far_set_start *= geo->far_set_size;
+
+ last_far_set_size = geo->far_set_size;
+ last_far_set_size += (geo->raid_disks % geo->far_set_size);
/* now calculate first sector/dev */
chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
/* and calculate all the others */
for (n = 0; n < geo->near_copies; n++) {
int d = dev;
+ int set;
sector_t s = sector;
- r10bio->devs[slot].addr = sector;
r10bio->devs[slot].devnum = d;
+ r10bio->devs[slot].addr = s;
slot++;
for (f = 1; f < geo->far_copies; f++) {
+ set = d / geo->far_set_size;
d += geo->near_copies;
- if (d >= geo->raid_disks)
- d -= geo->raid_disks;
+
+ if ((geo->raid_disks % geo->far_set_size) &&
+ (d > last_far_set_start)) {
+ d -= last_far_set_start;
+ d %= last_far_set_size;
+ d += last_far_set_start;
+ } else {
+ d %= geo->far_set_size;
+ d += geo->far_set_size * set;
+ }
s += geo->stride;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
* or recovery, so reshape isn't happening
*/
struct geom *geo = &conf->geo;
+ int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
+ int far_set_size = geo->far_set_size;
+ int last_far_set_start;
+
+ if (geo->raid_disks % geo->far_set_size) {
+ last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+ last_far_set_start *= geo->far_set_size;
+
+ if (dev >= last_far_set_start) {
+ far_set_size = geo->far_set_size;
+ far_set_size += (geo->raid_disks % geo->far_set_size);
+ far_set_start = last_far_set_start;
+ }
+ }
offset = sector & geo->chunk_mask;
if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
chunk = sector >> geo->chunk_shift;
fc = sector_div(chunk, geo->far_copies);
dev -= fc * geo->near_copies;
- if (dev < 0)
- dev += geo->raid_disks;
+ if (dev < far_set_start)
+ dev += far_set_size;
} else {
while (sector >= geo->stride) {
sector -= geo->stride;
- if (dev < geo->near_copies)
- dev += geo->raid_disks - geo->near_copies;
+ if (dev < (geo->near_copies + far_set_start))
+ dev += far_set_size - geo->near_copies;
else
dev -= geo->near_copies;
}
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
+ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_rw =
+ WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_rw =
+ WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
disks = mddev->raid_disks + mddev->delta_disks;
break;
}
- if (layout >> 17)
+ if (layout >> 18)
return -1;
if (chunk < (PAGE_SIZE >> 9) ||
!is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
geo->near_copies = nc;
geo->far_copies = fc;
geo->far_offset = fo;
+ geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
geo->chunk_mask = chunk - 1;
geo->chunk_shift = ffz(~chunk);
return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue,
+ mddev->chunk_sectors);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 1054cf60234..157d69e83ff 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,6 +33,11 @@ struct r10conf {
* far_offset, in which case it is
* 1 stripe.
*/
+ int far_set_size; /* The number of devices in a set,
+ * where a 'set' are devices that
+ * contain far/offset copies of
+ * each other.
+ */
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
} prev, geo;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5af2d270908..3ee2912889e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
-static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
@@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-#ifdef CONFIG_MULTICORE_RAID456
-static void async_run_ops(void *param, async_cookie_t cookie)
-{
- struct stripe_head *sh = param;
- unsigned long ops_request = sh->ops.request;
-
- clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
- wake_up(&sh->ops.wait_for_ops);
-
- __raid_run_ops(sh, ops_request);
- release_stripe(sh);
-}
-
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
-{
- /* since handle_stripe can be called outside of raid5d context
- * we need to ensure sh->ops.request is de-staged before another
- * request arrives
- */
- wait_event(sh->ops.wait_for_ops,
- !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
- sh->ops.request = ops_request;
-
- atomic_inc(&sh->count);
- async_schedule(async_run_ops, sh);
-}
-#else
-#define raid_run_ops __raid_run_ops
-#endif
-
static int grow_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
@@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
return 0;
sh->raid_conf = conf;
- #ifdef CONFIG_MULTICORE_RAID456
- init_waitqueue_head(&sh->ops.wait_for_ops);
- #endif
spin_lock_init(&sh->stripe_lock);
@@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
break;
nsh->raid_conf = conf;
- #ifdef CONFIG_MULTICORE_RAID456
- init_waitqueue_head(&nsh->ops.wait_for_ops);
- #endif
spin_lock_init(&nsh->stripe_lock);
list_add(&nsh->lru, &newstripes);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 671f5b171c7..c346941a251 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -858,6 +858,7 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
+ select POWER_SUPPLY
select MFD_CORE
select IRQ_DOMAIN
help
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index b1f3561b023..5f341a50ee5 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -594,9 +594,12 @@ static int ab8500_gpadc_runtime_suspend(struct device *dev)
static int ab8500_gpadc_runtime_resume(struct device *dev)
{
struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+ int ret;
- regulator_enable(gpadc->regu);
- return 0;
+ ret = regulator_enable(gpadc->regu);
+ if (ret)
+ dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
+ return ret;
}
static int ab8500_gpadc_runtime_idle(struct device *dev)
@@ -643,7 +646,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
}
/* VTVout LDO used to power up ab8500-GPADC */
- gpadc->regu = regulator_get(&pdev->dev, "vddadc");
+ gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc");
if (IS_ERR(gpadc->regu)) {
ret = PTR_ERR(gpadc->regu);
dev_err(gpadc->dev, "failed to get vtvout LDO\n");
@@ -652,7 +655,11 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpadc);
- regulator_enable(gpadc->regu);
+ ret = regulator_enable(gpadc->regu);
+ if (ret) {
+ dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret);
+ goto fail_enable;
+ }
pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
pm_runtime_use_autosuspend(gpadc->dev);
@@ -663,6 +670,8 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
list_add_tail(&gpadc->node, &ab8500_gpadc_list);
dev_dbg(gpadc->dev, "probe success\n");
return 0;
+
+fail_enable:
fail_irq:
free_irq(gpadc->irq, gpadc);
fail:
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 6b5edf64de2..4febc5c7fde 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -460,15 +460,15 @@ static void omap_usbhs_init(struct device *dev)
switch (omap->usbhs_rev) {
case OMAP_USBHS_REV1:
- omap_usbhs_rev1_hostconfig(omap, reg);
+ reg = omap_usbhs_rev1_hostconfig(omap, reg);
break;
case OMAP_USBHS_REV2:
- omap_usbhs_rev2_hostconfig(omap, reg);
+ reg = omap_usbhs_rev2_hostconfig(omap, reg);
break;
default: /* newer revisions */
- omap_usbhs_rev2_hostconfig(omap, reg);
+ reg = omap_usbhs_rev2_hostconfig(omap, reg);
break;
}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index bbdbc50a3cc..73bf76df104 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -257,9 +257,24 @@ static struct regmap_irq_chip palmas_irq_chip = {
PALMAS_INT1_MASK),
};
-static void palmas_dt_to_pdata(struct device_node *node,
+static int palmas_set_pdata_irq_flag(struct i2c_client *i2c,
struct palmas_platform_data *pdata)
{
+ struct irq_data *irq_data = irq_get_irq_data(i2c->irq);
+ if (!irq_data) {
+ dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq);
+ return -EINVAL;
+ }
+
+ pdata->irq_flags = irqd_get_trigger_type(irq_data);
+ dev_info(&i2c->dev, "Irq flag is 0x%08x\n", pdata->irq_flags);
+ return 0;
+}
+
+static void palmas_dt_to_pdata(struct i2c_client *i2c,
+ struct palmas_platform_data *pdata)
+{
+ struct device_node *node = i2c->dev.of_node;
int ret;
u32 prop;
@@ -283,6 +298,8 @@ static void palmas_dt_to_pdata(struct device_node *node,
pdata->power_ctrl = PALMAS_POWER_CTRL_NSLEEP_MASK |
PALMAS_POWER_CTRL_ENABLE1_MASK |
PALMAS_POWER_CTRL_ENABLE2_MASK;
+ if (i2c->irq)
+ palmas_set_pdata_irq_flag(i2c, pdata);
}
static int palmas_i2c_probe(struct i2c_client *i2c,
@@ -304,7 +321,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
if (!pdata)
return -ENOMEM;
- palmas_dt_to_pdata(node, pdata);
+ palmas_dt_to_pdata(i2c, pdata);
}
if (!pdata)
@@ -344,6 +361,19 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
}
}
+ /* Change interrupt line output polarity */
+ if (pdata->irq_flags & IRQ_TYPE_LEVEL_HIGH)
+ reg = PALMAS_POLARITY_CTRL_INT_POLARITY;
+ else
+ reg = 0;
+ ret = palmas_update_bits(palmas, PALMAS_PU_PD_OD_BASE,
+ PALMAS_POLARITY_CTRL, PALMAS_POLARITY_CTRL_INT_POLARITY,
+ reg);
+ if (ret < 0) {
+ dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret);
+ goto err;
+ }
+
/* Change IRQ into clear on read mode for efficiency */
slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
@@ -352,7 +382,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c,
regmap_write(palmas->regmap[slave], addr, reg);
ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW, 0, &palmas_irq_chip,
+ IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
goto err;
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 4658b5bdcd8..aeb8e40ab42 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -169,6 +169,7 @@ err:
void tps65912_device_exit(struct tps65912 *tps65912)
{
mfd_remove_devices(tps65912->dev);
+ tps65912_irq_exit(tps65912);
kfree(tps65912);
}
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index e16edca9267..d2ab222138c 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(twl4030_audio_enable_resource);
* Disable the resource.
* The function returns with error or the content of the register
*/
-int twl4030_audio_disable_resource(unsigned id)
+int twl4030_audio_disable_resource(enum twl4030_audio_res id)
{
struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
int val;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 88ff9dc8330..942b666a2a0 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -800,7 +800,7 @@ static int twl4030_madc_remove(struct platform_device *pdev)
static struct platform_driver twl4030_madc_driver = {
.probe = twl4030_madc_probe,
- .remove = __exit_p(twl4030_madc_remove),
+ .remove = twl4030_madc_remove,
.driver = {
.name = "twl4030_madc",
.owner = THIS_MODULE,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 6673e578b3e..ce5b75616b4 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -110,6 +110,7 @@ static struct file_system_type ibmasmfs_type = {
.mount = ibmasmfs_mount,
.kill_sb = kill_litter_super,
};
+MODULE_ALIAS_FS("ibmasmfs");
static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
{
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 82c06165d3d..92ab30ab00d 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1238,6 +1238,7 @@ static struct file_system_type mtd_inodefs_type = {
.mount = mtd_inodefs_mount,
.kill_sb = kill_anon_super,
};
+MODULE_ALIAS_FS("mtd_inodefs");
static int __init init_mtdchar(void)
{
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 11d01d67b3f..8b4e96e01d6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->dev_addr_from_first)
+ if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev,
}
block_netpoll_tx();
- call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
- if (bond->slave_cnt == 0)
+ if (bond->slave_cnt == 0) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
+ call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
+ }
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 639049d7e92..da5f4397f87 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
} else {
+ /* Omit CRC. */
+ len -= ETH_FCS_LEN;
+
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
if (new_skb) {
skb_put(new_skb, len);
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
new_skb->data,
len);
+ skb_checksum_none_assert(skb);
new_skb->protocol =
eth_type_trans(new_skb, bgmac->net_dev);
netif_receive_skb(new_skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecac04a3687..a923bc4d5a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
tsum = ~csum_fold(csum_add((__force __wsum) csum,
csum_partial(t_header, -fix, 0)));
- return bswab16(csum);
+ return bswab16(tsum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9a674b14b40..edfa67adf2f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+ if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1663e0b6b5a..77ebae0ac64 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6,
&rx_tx_in_reset);
- if (!rx_tx_in_reset) {
+ if ((!rx_tx_in_reset) &&
+ (params->link_flags &
+ PHY_INITIALIZED)) {
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_config_sfi(phy, params);
bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -10422,6 +10424,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_ON:
@@ -10468,6 +10492,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
}
break;
@@ -10532,6 +10578,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ LINK_FLAGS_INT_DISABLED) {
+ bnx2x_link_int_enable(params);
+ params->link_flags &=
+ ~LINK_FLAGS_INT_DISABLED;
+ }
+ }
}
break;
}
@@ -11791,6 +11853,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
@@ -12465,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
+ vars->check_kr2_recovery_cnt = 0;
+ params->link_flags = PHY_INITIALIZED;
/* Driver opens NIG-BRB filters */
bnx2x_set_rx_filter(params, 1);
/* Check if link flap can be avoided */
@@ -12629,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
struct bnx2x *bp = params->bp;
vars->link_up = 0;
vars->phy_flags = 0;
+ params->link_flags &= ~PHY_INITIALIZED;
if (!params->lfa_base)
return bnx2x_link_reset(params, vars, 1);
/*
@@ -13349,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
bnx2x_update_link_attr(params, vars->link_attr_sync);
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
/* Restart AN on leading lane */
bnx2x_warpcore_restart_AN_KR(phy, params);
}
@@ -13377,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
return;
}
+ /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+ * since some switches tend to reinit the AN process and clear the
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * and recovered many times
+ */
+ if (vars->check_kr2_recovery_cnt > 0) {
+ vars->check_kr2_recovery_cnt--;
+ return;
+ }
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
@@ -13437,7 +13514,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & SPEED_20000))
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d25c7d79787..56c2aae4e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -307,7 +307,9 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
- u16 rsrv1;
+ u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED (1<<0)
+#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
};
@@ -341,7 +343,8 @@ struct link_vars {
u32 link_status;
u32 eee_status;
u8 fault_detected;
- u8 rsrv1;
+ u8 check_kr2_recovery_cnt;
+#define CHECK_KR2_RECOVERY_CNT 5
u16 periodic_flags;
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fdb9b565541..93729f94235 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp)
tg3_ump_link_report(tp);
}
+
+ tp->link_up = netif_carrier_ok(tp->dev);
}
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
@@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
-static void tg3_carrier_on(struct tg3 *tp)
-{
- netif_carrier_on(tp->dev);
- tp->link_up = true;
-}
-
static void tg3_carrier_off(struct tg3 *tp)
{
netif_carrier_off(tp->dev);
@@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp)
return -EBUSY;
if (netif_running(tp->dev) && tp->link_up) {
- tg3_carrier_off(tp);
+ netif_carrier_off(tp->dev);
tg3_link_report(tp);
}
@@ -4262,9 +4258,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
{
if (curr_link_up != tp->link_up) {
if (curr_link_up) {
- tg3_carrier_on(tp);
+ netif_carrier_on(tp->dev);
} else {
- tg3_carrier_off(tp);
+ netif_carrier_off(tp->dev);
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 28ceb841418..29aff55f2ee 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -349,6 +349,7 @@ struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
+ u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 071aea79d21..3c9b4f12e3e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
-static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+static u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
- u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
- SLIPORT_SEMAPHORE_OFFSET_BE;
- pci_read_config_dword(adapter->pdev, reg, &sem);
- *stage = sem & POST_STAGE_MASK;
-
- if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
- return -1;
+ if (BEx_chip(adapter))
+ sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
else
- return 0;
+ pci_read_config_dword(adapter->pdev,
+ SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+
+ return sem & POST_STAGE_MASK;
}
int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
}
do {
- status = be_POST_stage_get(adapter, &stage);
- if (status) {
- dev_err(dev, "POST error; stage=0x%x\n", stage);
- return -1;
- } else if (stage != POST_STAGE_ARMFW_RDY) {
- if (msleep_interruptible(2000)) {
- dev_err(dev, "Waiting for POST aborted\n");
- return -EINTR;
- }
- timeout += 2;
- } else {
+ stage = be_POST_stage_get(adapter);
+ if (stage == POST_STAGE_ARMFW_RDY)
return 0;
+
+ dev_info(dev, "Waiting for POST, %ds elapsed\n",
+ timeout);
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
}
+ timeout += 2;
} while (timeout < 60);
dev_err(dev, "POST timeout; stage=0x%x\n", stage);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 541d4530d5b..62dc220695f 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,8 +32,8 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
-#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
-#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
+#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3860888ac71..08e54f3d288 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
+ if (adapter->csr)
+ pci_iounmap(adapter->pdev, adapter->csr);
if (adapter->db)
pci_iounmap(adapter->pdev, adapter->db);
}
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
SLI_INTF_IF_TYPE_SHIFT;
+ if (BEx_chip(adapter) && be_physfn(adapter)) {
+ adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+ if (adapter->csr == NULL)
+ return -ENOMEM;
+ }
+
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
if (addr == NULL)
goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
+ dev_info(&adapter->pdev->dev,
+ "Waiting for FW to be ready after EEH reset\n");
status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141..069a155d16e 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
- unsigned long flags;
+ unsigned int index;
if (!fep->link) {
/* Link is down or autonegotiation is in progress. */
return NETDEV_TX_BUSY;
}
- spin_lock_irqsave(&fep->hw_lock, flags);
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* This should not happen, since ndev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", ndev->name);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- unsigned int index;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, skb->len);
/* Save skb pointer */
- fep->tx_skbuff[fep->skb_cur] = skb;
-
- ndev->stats.tx_bytes += skb->len;
- fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = skb;
/* Push the data cache so the CPM does not get stale memory
* data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
}
- /* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
/* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- if (bdp == fep->dirty_tx) {
- fep->tx_full = 1;
+ fep->cur_tx = bdp;
+
+ if (fep->cur_tx == fep->dirty_tx)
netif_stop_queue(ndev);
- }
- fep->cur_tx = bdp;
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
skb_tx_timestamp(skb);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
-
return NETDEV_TX_OK;
}
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
+ int index = 0;
fep = netdev_priv(ndev);
- spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
+ /* get next bdp of dirty_tx */
+ if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
- if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+ /* current queue is empty */
+ if (bdp == fep->cur_tx)
break;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
- skb = fep->tx_skbuff[fep->skb_dirty];
+ skb = fep->tx_skbuff[index];
+
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- fep->tx_skbuff[fep->skb_dirty] = NULL;
- fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = NULL;
+
+ fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->tx_full) {
- fep->tx_full = 0;
+ if (fep->dirty_tx != fep->cur_tx) {
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
}
- fep->dirty_tx = bdp;
- spin_unlock(&fep->hw_lock);
+ return;
}
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events, fep->hwp + FEC_IEVENT);
- if (int_events & FEC_ENET_RXF) {
+ if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
ret = IRQ_HANDLED;
/* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
}
}
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(ndev);
- }
-
if (int_events & FEC_ENET_MII) {
ret = IRQ_HANDLED;
complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
int pkts = fec_enet_rx(ndev, budget);
struct fec_enet_private *fep = netdev_priv(ndev);
+ fec_enet_tx(ndev);
+
if (pkts < budget) {
napi_complete(napi);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
fec_restart(ndev, 0);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c..f5390071efd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -97,6 +97,13 @@ struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
};
+#else
+struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#endif
struct bufdesc_ex {
struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
unsigned short res0[4];
};
-#else
-struct bufdesc {
- unsigned short cbd_sc; /* Control and status info */
- unsigned short cbd_datlen; /* Data length */
- unsigned long cbd_bufaddr; /* Buffer address */
-};
-#endif
-
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
struct sk_buff *rx_skbuff[RX_RING_SIZE];
- ushort skb_cur;
- ushort skb_dirty;
/* CPM dual port RAM relative addresses */
dma_addr_t bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
- uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 2c1813737f6..f91a8f3f9d4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/mdio.h>
+#include <linux/pm_runtime.h>
#include "e1000.h"
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static int e1000e_ethtool_begin(struct net_device *netdev)
+{
+ return pm_runtime_get_sync(netdev->dev.parent);
+}
+
+static void e1000e_ethtool_complete(struct net_device *netdev)
+{
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
+ .begin = e1000e_ethtool_begin,
+ .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index dff7bff8b8e..121a865c7fb 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -782,6 +782,59 @@ release:
}
/**
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ * preventing further DMA write requests. Workaround the issue by disabling
+ * the de-assertion of the clock request when in 1Gpbs mode.
+ **/
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+ u32 fextnvm6 = er32(FEXTNVM6);
+ s32 ret_val = 0;
+
+ if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
+ u16 kmrn_reg;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ goto release;
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg &
+ ~E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto release;
+
+ usleep_range(10, 20);
+
+ ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+release:
+ hw->phy.ops.release(hw);
+ } else {
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ /* Work-around I218 hang issue */
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
if (hw->phy.type == e1000_phy_i217) {
- u16 phy_reg;
+ u16 phy_reg, device_id = hw->adapter->pdev->device;
+
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index b6d3174d7d2..8bf4655c2e1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -92,6 +92,8 @@
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a177b8b65c4..948b86ffa4f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
adapter->idle_check = true;
+ hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
/* fire a link status change interrupt to start the watchdog */
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
+ pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
+ pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -5887,8 +5890,7 @@ release:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
- bool runtime)
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
e1000e_reset_interrupt_capability(adapter);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
ew32(WUFC, 0);
}
- *enable_wake = !!wufc;
-
- /* make sure adapter isn't asleep if manageability is enabled */
- if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
- (hw->mac.ops.check_mng_mode(hw)))
- *enable_wake = true;
-
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
*/
e1000e_release_hw_control(adapter);
- pci_disable_device(pdev);
-
- return 0;
-}
-
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
-{
- if (sleep && wake) {
- pci_prepare_to_sleep(pdev);
- return;
- }
-
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
- bool wake)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ pci_clear_master(pdev);
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
- e1000_power_off(pdev, sleep, wake);
+ pci_save_state(pdev);
+ pci_prepare_to_sleep(pdev);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
- } else {
- e1000_power_off(pdev, sleep, wake);
}
+
+ return 0;
}
#ifdef CONFIG_PCIEASPM
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (aspm_disable_flag)
e1000e_disable_aspm(pdev, aspm_disable_flag);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+ pci_set_master(pdev);
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake, false);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
- return retval;
+ return __e1000_shutdown(pdev, false);
}
static int e1000_resume(struct device *dev)
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (e1000e_pm_ready(adapter)) {
- bool wake;
-
- __e1000_shutdown(pdev, &wake, true);
- }
+ if (!e1000e_pm_ready(adapter))
+ return 0;
- return 0;
+ return __e1000_shutdown(pdev, true);
}
static int e1000_idle(struct device *dev)
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
static void e1000_shutdown(struct pci_dev *pdev)
{
- bool wake = false;
-
- __e1000_shutdown(pdev, &wake, false);
-
- if (system_state == SYSTEM_POWER_OFF)
- e1000_complete_shutdown(pdev, false, wake);
+ __e1000_shutdown(pdev, false);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- pci_set_master(pdev);
pdev->state_saved = true;
pci_restore_state(pdev);
+ pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 794fe149766..a7e6a3e3725 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -42,6 +42,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 84e7e0909de..b64542acfa3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.type) {
case e1000_phy_i210:
case e1000_phy_m88:
- if (hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID)
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
- else
+ break;
+ default:
ret_val = igb_copper_link_setup_m88(hw);
+ break;
+ }
break;
case e1000_phy_igp_3:
ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d27edbc6392..25151401c2a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,7 +447,7 @@ struct igb_adapter {
#endif
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
- struct igb_i2c_client_list *i2c_clients;
+ struct i2c_client *i2c_client;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0a9b073d0b0..4623502054d 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,6 +39,10 @@
#include <linux/pci.h>
#ifdef CONFIG_IGB_HWMON
+struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
/* hwmon callback functions */
static ssize_t igb_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
unsigned int i;
int n_attrs;
int rc = 0;
+ struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
if (rc)
goto exit;
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
/* Allocation space for max attributes
* max num sensors * values (loc, temp, max, caution)
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed79a1c53b5..4dbd62968c7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
-static const struct i2c_board_info i350_sensor_info = {
- I2C_BOARD_INFO("i350bb", 0Xf8),
-};
-
/* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
*
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /* Guarantee this function can be used by verifying buffer sizes */
- BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
- NET_IP_ALIGN +
- IGB_TS_HDR_LEN +
- ETH_FRAME_LEN +
- ETH_FCS_LEN));
-
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
-static DEFINE_SPINLOCK(i2c_clients_lock);
-
-/* igb_get_i2c_client - returns matching client
- * in adapters's client list.
- * @adapter: adapter struct
- * @dev_addr: device address of i2c needed.
- */
-static struct i2c_client *
-igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
-{
- ulong flags;
- struct igb_i2c_client_list *client_list;
- struct i2c_client *client = NULL;
- struct i2c_board_info client_info = {
- I2C_BOARD_INFO("igb", 0x00),
- };
-
- spin_lock_irqsave(&i2c_clients_lock, flags);
- client_list = adapter->i2c_clients;
-
- /* See if we already have an i2c_client */
- while (client_list) {
- if (client_list->client->addr == (dev_addr >> 1)) {
- client = client_list->client;
- goto exit;
- } else {
- client_list = client_list->next;
- }
- }
-
- /* no client_list found, create a new one */
- client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
- if (client_list == NULL)
- goto exit;
-
- /* dev_addr passed to us is left-shifted by 1 bit
- * i2c_new_device call expects it to be flush to the right.
- */
- client_info.addr = dev_addr >> 1;
- client_info.platform_data = adapter;
- client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
- if (client_list->client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
- goto err_no_client;
- }
-
- /* insert new client at head of list */
- client_list->next = adapter->i2c_clients;
- adapter->i2c_clients = client_list;
-
- client = client_list->client;
- goto exit;
-
-err_no_client:
- kfree(client_list);
-exit:
- spin_unlock_irqrestore(&i2c_clients_lock, flags);
- return client;
-}
-
/* igb_read_i2c_byte - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = 0;
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = E1000_SWFW_PHY0_SM;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 29140502b71..6562c736a1d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
/* mii management interface *************************************************/
+static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+{
+ u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+ u32 autoneg_disable = FORCE_LINK_PASS |
+ DISABLE_AUTO_NEG_SPEED_GMII |
+ DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
+ DISABLE_AUTO_NEG_FOR_DUPLEX;
+
+ if (mp->phy->autoneg == AUTONEG_ENABLE) {
+ /* enable auto negotiation */
+ pscr &= ~autoneg_disable;
+ goto out_write;
+ }
+
+ pscr |= autoneg_disable;
+
+ if (mp->phy->speed == SPEED_1000) {
+ /* force gigabit, half duplex not supported */
+ pscr |= SET_GMII_SPEED_TO_1000;
+ pscr |= SET_FULL_DUPLEX_MODE;
+ goto out_write;
+ }
+
+ pscr &= ~SET_GMII_SPEED_TO_1000;
+
+ if (mp->phy->speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+ else
+ pscr &= ~SET_MII_SPEED_TO_100;
+
+ if (mp->phy->duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ else
+ pscr &= ~SET_FULL_DUPLEX_MODE;