aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/bcache.txt12
-rw-r--r--Documentation/devices.txt8
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt2
-rw-r--r--Documentation/dmatest.txt6
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/m68k/kernel-options.txt2
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S28
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S1
-rw-r--r--arch/arm/boot/compressed/head-shark.S1
-rw-r--r--arch/arm/boot/compressed/head.S5
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts5
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts5
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi1
-rw-r--r--arch/arm/boot/dts/imx25.dtsi12
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi20
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts20
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/mach-exynos/common.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-kirkwood/board-ts219.c10
-rw-r--r--arch/arm/mach-kirkwood/mpp.c5
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S16
-rw-r--r--arch/arm/mach-omap2/clock36xx.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c9
-rw-r--r--arch/arm/mach-omap2/pm34xx.c6
-rw-r--r--arch/arm/mach-prima2/pm.c6
-rw-r--r--arch/arm/mach-prima2/rstc.c6
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c3
-rw-r--r--arch/arm/mach-ux500/cpuidle.c4
-rw-r--r--arch/arm/plat-samsung/include/plat/uncompress.h10
-rw-r--r--arch/arm/plat-samsung/pm.c18
-rw-r--r--arch/m68k/include/asm/gpio.h3
-rw-r--r--arch/mips/cavium-octeon/setup.c15
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/ptrace.h32
-rw-r--r--arch/mips/include/uapi/asm/kvm.h81
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h17
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c11
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c11
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/idle.c13
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/traps.c28
-rw-r--r--arch/mips/kvm/kvm_mips.c83
-rw-r--r--arch/mips/mm/tlbex.c4
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h16
-rw-r--r--arch/powerpc/kernel/cputable.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S28
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S92
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/booke.c18
-rw-r--r--arch/powerpc/kvm/e500_mmu.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/s390/include/asm/pgtable.h32
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/irq.c64
-rw-r--r--arch/s390/kernel/sclp.S2
-rw-r--r--arch/s390/pci/pci.c33
-rw-r--r--arch/sparc/kernel/prom_common.c5
-rw-r--r--arch/x86/boot/compressed/eboot.c47
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/platform/efi/efi.c188
-rw-r--r--arch/x86/tools/relocs.c4
-rw-r--r--arch/x86/xen/smp.c8
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--drivers/acpi/apei/ghes.c7
-rw-r--r--drivers/acpi/device_pm.c10
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/video.c19
-rw-r--r--drivers/base/regmap/regcache-rbtree.c6
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/cciss.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c5
-rw-r--r--drivers/cpufreq/cpufreq_governor.c3
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/gpu/drm/drm_irq.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c9
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r420.c10
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c9
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c16
-rw-r--r--drivers/irqchip/irq-mxs.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c185
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c38
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/nfc.c2
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c134
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c45
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c131
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-tps6586x.c3
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/s390/net/netiucv.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c2
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi-xilinx.c74
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/udc.c13
-rw-r--r--drivers/usb/serial/f81232.c8
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/spcp8x5.c10
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/net.c29
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/xen/tmem.c4
-rw-r--r--fs/aio.c36
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/ceph/locks.c73
-rw-r--r--fs/ceph/mds_client.c65
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/ecryptfs/file.c6
-rw-r--r--fs/file_table.c19
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/namei.c4
-rw-r--r--fs/ncpfs/dir.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/proc/kmsg.c10
-rw-r--r--fs/xfs/xfs_attr_leaf.h1
-rw-r--r--fs/xfs/xfs_btree.c10
-rw-r--r--fs/xfs/xfs_dir2_format.h5
-rw-r--r--fs/xfs/xfs_log_recover.c19
-rw-r--r--fs/xfs/xfs_mount.c18
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/if_team.h4
-rw-r--r--include/linux/math64.h6
-rw-r--r--include/linux/rculist.h20
-rw-r--r--include/linux/rcupdate.h9
-rw-r--r--include/linux/scatterlist.h3
-rw-r--r--include/linux/smp.h19
-rw-r--r--include/linux/swapops.h3
-rw-r--r--include/linux/syslog.h4
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bluetooth/mgmt.h1
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/cpu.c55
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/irqdomain.c9
-rw-r--r--kernel/printk.c91
-rw-r--r--kernel/rcutree.c21
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/sys.c29
-rw-r--r--kernel/time/ntp.c1
-rw-r--r--kernel/time/tick-broadcast.c8
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_selftest.c2
-rw-r--r--lib/mpi/mpicoder.c2
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/swap_state.c18
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/9p/client.c55
-rw-r--r--net/batman-adv/bat_iv_ogm.c87
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c4
-rw-r--r--net/batman-adv/sysfs.c5
-rw-r--r--net/bluetooth/hci_core.c21
-rw-r--r--net/bluetooth/l2cap_core.c73
-rw-r--r--net/bluetooth/mgmt.c23
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/sock_diag.c9
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/mlme.c87
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c7
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netfilter/xt_TCPMSS.c23
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c6
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/wireless/nl80211.c17
-rw-r--r--scripts/Makefile.lib8
-rw-r--r--scripts/dtc/dtc-lexer.l2
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped232
-rw-r--r--scripts/dtc/dtc-parser.tab.c_shipped715
-rw-r--r--scripts/dtc/dtc-parser.tab.h_shipped14
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/pci/hda/hda_generic.c68
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/pci/hda/patch_via.c10
-rw-r--r--sound/pci/sis7019.c3
-rw-r--r--sound/soc/codecs/cs42l52.c6
-rw-r--r--sound/soc/codecs/tlv320aic3x.c10
-rw-r--r--sound/soc/codecs/wm5102.c3
-rw-r--r--sound/soc/codecs/wm5110.c3
-rw-r--r--sound/soc/codecs/wm8994.c3
-rw-r--r--sound/soc/soc-dapm.c49
-rw-r--r--sound/soc/soc-pcm.c13
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/quirks-table.h14
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
369 files changed, 8019 insertions, 2198 deletions
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index 77db8809bd9..b3a7e7d384f 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -319,7 +319,10 @@ cache<0..n>
Symlink to each of the cache devices comprising this cache set.
cache_available_percent
- Percentage of cache device free.
+ Percentage of cache device which doesn't contain dirty data, and could
+ potentially be used for writeback. This doesn't mean this space isn't used
+ for clean cached data; the unused statistic (in priority_stats) is typically
+ much lower.
clear_stats
Clears the statistics associated with this cache
@@ -423,8 +426,11 @@ nbuckets
Total buckets in this cache
priority_stats
- Statistics about how recently data in the cache has been accessed. This can
- reveal your working set size.
+ Statistics about how recently data in the cache has been accessed.
+ This can reveal your working set size. Unused is the percentage of
+ the cache that doesn't contain any data. Metadata is bcache's
+ metadata overhead. Average is the average priority of cache buckets.
+ Next is a list of quantiles with the priority threshold of each.
written
Sum of all data that has been written to the cache; comparison with
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 08f01e79c41..b9015912bca 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -498,12 +498,8 @@ Your cooperation is appreciated.
Each device type has 5 bits (32 minors).
- 13 block 8-bit MFM/RLL/IDE controller
- 0 = /dev/xda First XT disk whole disk
- 64 = /dev/xdb Second XT disk whole disk
-
- Partitions are handled in the same way as IDE disks
- (see major number 3).
+ 13 block Previously used for the XT disk (/dev/xdN)
+ Deleted in kernel v3.9.
14 char Open Sound System (OSS)
0 = /dev/mixer Mixer control
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
index 2a3feabd3b2..34c1505774b 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
@@ -1,7 +1,7 @@
Atmel AT91RM9200 Real Time Clock
Required properties:
-- compatible: should be: "atmel,at91rm9200-rtc"
+- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: rtc alarm/event interrupt
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
index 279ac0a8c5b..132a094c7bc 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmatest.txt
@@ -34,7 +34,7 @@ command:
After a while you will start to get messages about current status or error like
in the original code.
-Note that running a new test will stop any in progress test.
+Note that running a new test will not stop any in progress test.
The following command should return actual state of the test.
% cat /sys/kernel/debug/dmatest/run
@@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state.
The module parameters that is supplied to the kernel command line will be used
for the first performed test. After user gets a control, the test could be
-interrupted or re-run with same or different parameters. For the details see
-the above section "Part 2 - When dmatest is built as a module..."
+re-run with the same or different parameters. For the details see the above
+section "Part 2 - When dmatest is built as a module..."
In both cases the module parameters are used as initial values for the test case.
You always could check them at run-time by running
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6e3b18a8afc..2fe6e767b3d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3351,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
plus one apbt timer for broadcast timer.
x86_mrst_timer=apbt_only | lapic_and_apbt
- xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
- xd_geo= See header of drivers/block/xd.c.
-
xen_emul_unplug= [HW,X86,XEN]
Unplug Xen emulated devices
Format: [unplug0,][unplug1]
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index 97d45f276fe..eaf32a1fd0b 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -80,8 +80,6 @@ Valid names are:
/dev/sdd: -> 0x0830 (forth SCSI disk)
/dev/sde: -> 0x0840 (fifth SCSI disk)
/dev/fd : -> 0x0200 (floppy disk)
- /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
- /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
The name must be followed by a decimal number, that stands for the
partition number. Internally, the value of the number is just
diff --git a/MAINTAINERS b/MAINTAINERS
index 0518ec49467..60d6a339350 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2895,8 +2895,8 @@ F: drivers/media/dvb-frontends/ec100*
ECRYPT FILE SYSTEM
M: Tyler Hicks <tyhicks@canonical.com>
-M: Dustin Kirkland <dustin.kirkland@gazzang.com>
L: ecryptfs@vger.kernel.org
+W: http://ecryptfs.org
W: https://launchpad.net/ecryptfs
S: Supported
F: Documentation/filesystems/ecryptfs.txt
@@ -4453,6 +4453,16 @@ S: Maintained
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
+M: Or Gerlitz <ogerlitz@mellanox.com>
+M: Roi Dayan <roid@mellanox.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: http://www.openfabrics.org
+W: www.open-iscsi.org
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/ulp/iser
+
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5761,7 +5771,7 @@ M: Matthew Wilcox <willy@linux.intel.com>
L: linux-nvme@lists.infradead.org
T: git git://git.infradead.org/users/willy/linux-nvme.git
S: Supported
-F: drivers/block/nvme.c
+F: drivers/block/nvme*
F: include/linux/nvme.h
OMAP SUPPORT
@@ -7619,7 +7629,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
M: Grant Likely <grant.likely@linaro.org>
-L: spi-devel-general@lists.sourceforge.net
+L: linux-spi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
S: Maintained
@@ -8999,7 +9009,7 @@ S: Maintained
F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS
-M: Mark Brown <broonie@opensource.wolfsonmicro.com>
+M: Mark Brown <broonie@kernel.org>
M: Liam Girdwood <lrg@slimlogic.co.uk>
L: linux-input@vger.kernel.org
T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
@@ -9009,7 +9019,6 @@ F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
-M: Mark Brown <broonie@opensource.wolfsonmicro.com>
L: patches@opensource.wolfsonmicro.com
T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
diff --git a/Makefile b/Makefile
index 49aa84b9d2f..c6863b55f7c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Unicycling Gorilla
# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3580d57ea21..79e9bdbfc49 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -124,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all -DZIMAGE
+asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 6e8382d5b7a..5392ee63338 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -1,6 +1,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
#include CONFIG_DEBUG_LL_INCLUDE
ENTRY(putc)
@@ -10,3 +12,29 @@ ENTRY(putc)
busyuart r3, r1
mov pc, lr
ENDPROC(putc)
+
+#else
+
+ENTRY(putc)
+ adr r1, 1f
+ ldmia r1, {r2, r3}
+ add r2, r2, r1
+ ldr r1, [r2, r3]
+ strb r0, [r1]
+ mov r0, #0x03 @ SYS_WRITEC
+ ARM( svc #0x123456 )
+ THUMB( svc #0xab )
+ mov pc, lr
+ .align 2
+1: .word _GLOBAL_OFFSET_TABLE_ - .
+ .word semi_writec_buf(GOT)
+ENDPROC(putc)
+
+ .bss
+ .global semi_writec_buf
+ .type semi_writec_buf, %object
+semi_writec_buf:
+ .space 4
+ .size semi_writec_buf, 4
+
+#endif
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
index 6179d94dd5c..3115e313d9f 100644
--- a/arch/arm/boot/compressed/head-sa1100.S
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -11,6 +11,7 @@
#include <asm/mach-types.h>
.section ".start", "ax"
+ .arch armv4
__SA1100_start:
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
index 089c560e07f..92b56897ed6 100644
--- a/arch/arm/boot/compressed/head-shark.S
+++ b/arch/arm/boot/compressed/head-shark.S
@@ -18,6 +18,7 @@
.section ".start", "ax"
+ .arch armv4
b __beginning
__ofw_data: .long 0 @ the number of memory blocks
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fe4d9c3ad76..032a8d98714 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -11,6 +11,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+ .arch armv7-a
/*
* Debugging stuff
*
@@ -805,8 +806,8 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
- .word 0x00000000 @ old ARM ID
- .word 0x0000f000
+ .word 0x41000000 @ old ARM ID
+ .word 0xff00f000
mov pc, lr
THUMB( nop )
mov pc, lr
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 1460d9b88ad..8e1248f01fa 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -409,8 +409,8 @@
ti,hwmods = "gpmc";
reg = <0x50000000 0x2000>;
interrupts = <100>;
- num-cs = <7>;
- num-waitpins = <2>;
+ gpmc,num-cs = <7>;
+ gpmc,num-waitpins = <2>;
#address-cells = <2>;
#size-cells = <1>;
status = "disabled";
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 3ee63d128e2..76db557adbe 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -39,8 +39,9 @@
};
soc {
- ranges = <0 0 0xd0000000 0x100000
- 0xf0000000 0 0xf0000000 0x1000000>;
+ ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
+ 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
+ 0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>;
internal-regs {
serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index 46b785064dd..fdea75c7341 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -27,8 +27,9 @@
};
soc {
- ranges = <0 0 0xd0000000 0x100000
- 0xf0000000 0 0xf0000000 0x8000000>;
+ ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
+ 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
+ 0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>;
internal-regs {
serial@12000 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index f0052dccf9a..1e12aeff403 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -44,6 +44,7 @@
reg = <0x7e201000 0x1000>;
interrupts = <2 25>;
clock-frequency = <3000000>;
+ arm,primecell-periphid = <0x00241011>;
};
gpio: gpio {
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index d2550e0bca2..701153992c6 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -141,8 +141,8 @@
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x43fa4000 0x4000>;
- clocks = <&clks 62>;
- clock-names = "ipg";
+ clocks = <&clks 62>, <&clks 62>;
+ clock-names = "ipg", "per";
interrupts = <14>;
status = "disabled";
};
@@ -182,8 +182,8 @@
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x50004000 0x4000>;
interrupts = <0>;
- clocks = <&clks 80>;
- clock-names = "ipg";
+ clocks = <&clks 80>, <&clks 80>;
+ clock-names = "ipg", "per";
status = "disabled";
};
@@ -210,8 +210,8 @@
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x50010000 0x4000>;
- clocks = <&clks 79>;
- clock-names = "ipg";
+ clocks = <&clks 79>, <&clks 79>;
+ clock-names = "ipg", "per";
interrupts = <13>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index ff4bd4873ed..75bd1138651 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -131,7 +131,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x1000e000 0x1000>;
interrupts = <16>;
- clocks = <&clks 53>, <&clks 0>;
+ clocks = <&clks 53>, <&clks 53>;
clock-names = "ipg", "per";
status = "disabled";
};
@@ -142,7 +142,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x1000f000 0x1000>;
interrupts = <15>;
- clocks = <&clks 52>, <&clks 0>;
+ clocks = <&clks 52>, <&clks 52>;
clock-names = "ipg", "per";
status = "disabled";
};
@@ -223,7 +223,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x10017000 0x1000>;
interrupts = <6>;
- clocks = <&clks 51>, <&clks 0>;
+ clocks = <&clks 51>, <&clks 51>;
clock-names = "ipg", "per";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 21bb786c5b3..53fdde69bbf 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -631,7 +631,7 @@
compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
reg = <0x83fc0000 0x4000>;
interrupts = <38>;
- clocks = <&clks 55>, <&clks 0>;
+ clocks = <&clks 55>, <&clks 55>;
clock-names = "ipg", "per";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 845982eaac2..eb83aa039b8 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -714,7 +714,7 @@
compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
reg = <0x63fc0000 0x4000>;
interrupts = <38>;
- clocks = <&clks 55>, <&clks 0>;
+ clocks = <&clks 55>, <&clks 55>;
clock-names = "ipg", "per";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 03bd60deb52..eeb734e2570 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -56,9 +56,23 @@
};
};
+&omap4_pmx_wkup {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &twl6030_wkup_pins
+ >;
+
+ twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+ pinctrl-single,pins = <
+ 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+ >;
+ };
+};
+
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
+ &twl6030_pins
&twl6040_pins
&mcpdm_pins
&mcbsp1_pins
@@ -66,6 +80,12 @@
&tpd12s015_pins
>;
+ twl6030_pins: pinmux_twl6030_pins {
+ pinctrl-single,pins = <
+ 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+ >;
+ };
+
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index a35d9cd5806..98505a2ef16 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -142,9 +142,23 @@
};
};
+&omap4_pmx_wkup {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &twl6030_wkup_pins
+ >;
+
+ twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+ pinctrl-single,pins = <
+ 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+ >;
+ };
+};
+
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
+ &twl6030_pins
&twl6040_pins
&mcpdm_pins
&dmic_pins
@@ -179,6 +193,12 @@
>;
};
+ twl6030_pins: pinmux_twl6030_pins {
+ pinctrl-single,pins = <
+ 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+ >;
+ };
+
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 3dd7ff82582..635cae28301 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -538,6 +538,7 @@
interrupts = <0 41 0x4>;
ti,hwmods = "timer5";
ti,timer-dsp;
+ ti,timer-pwm;
};
timer6: timer@4013a000 {
@@ -574,6 +575,7 @@
reg = <0x4803e000 0x80>;
interrupts = <0 45 0x4>;
ti,hwmods = "timer9";
+ ti,timer-pwm;
};
timer10: timer@48086000 {
@@ -581,6 +583,7 @@
reg = <0x48086000 0x80>;
interrupts = <0 46 0x4>;
ti,hwmods = "timer10";
+ ti,timer-pwm;
};
timer11: timer@48088000 {
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a..209e6504922 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- /* Read TPIDRPRW */
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * Read TPIDRPRW.
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
return off;
}
#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index f10316b4ecd..c5a59546a25 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/node.h>
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
* cpu topology table
*/
struct cputopo_arm cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 027c9e7f0d1..f7e504b7874 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -386,6 +386,8 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
void __init exynos_init_io(struct map_desc *mach_desc, int size)
{
+ debug_ll_io_init();
+
#ifdef CONFIG_OF
if (initial_boot_params)
of_scan_flat_dt(exynos_fdt_map_chipid, NULL);
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index dda9a2bd3ac..4e3148ce852 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -181,14 +181,14 @@ static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy",
static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
-static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "axi", "ahb", };
static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c
index acb0187c7ee..4695d5f35fc 100644
--- a/arch/arm/mach-kirkwood/board-ts219.c
+++ b/arch/arm/mach-kirkwood/board-ts219.c
@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void)
pm_power_off = qnap_tsx1x_power_off;
}
-
-/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
-static int __init ts219_pci_init(void)
-{
- if (machine_is_ts219())
- kirkwood_pcie_init(KW_PCIE0);
-
- return 0;
-}
-subsys_initcall(ts219_pci_init);
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 827cde42414..e96fd71abd7 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
kirkwood_pcie_id(&dev, &rev);
- if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
- (dev == MV88F6282_DEV_ID))
+ if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
return MPP_F6281_MASK;
+ if (dev == MV88F6282_DEV_ID)
+ return MPP_F6282_MASK;
if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
return MPP_F6192_MASK;
if (dev == MV88F6180_DEV_ID)
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 53e8391192c..5476669ba90 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -32,15 +32,21 @@ ENTRY(ll_set_cpu_coherent)
/* Add CPU to SMP group - Atomic */
add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
- ldr r2, [r3]
+1:
+ ldrex r2, [r3]
orr r2, r2, r1
- str r2, [r3]
+ strex r0, r2, [r3]
+ cmp r0, #0
+ bne 1b
/* Enable coherency on CPU - Atomic */
- add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
- ldr r2, [r3]
+ add r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
+1:
+ ldrex r2, [r3]
orr r2, r2, r1
- str r2, [r3]
+ strex r0, r2, [r3]
+ cmp r0, #0
+ bne 1b
dsb
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c
index 8f3bf4e5090..bbd6a3f717e 100644
--- a/arch/arm/mach-omap2/clock36xx.c
+++ b/arch/arm/mach-omap2/clock36xx.c
@@ -20,11 +20,12 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clock.h"
#include "clock36xx.h"
-
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
/**
* omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@@ -39,29 +40,28 @@
*/
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
- struct clk_hw_omap *parent;
+ struct clk_divider *parent;
struct clk_hw *parent_hw;
- u32 dummy_v, orig_v, clksel_shift;
+ u32 dummy_v, orig_v;
int ret;
/* Clear PWRDN bit of HSDIVIDER */
ret = omap2_dflt_clk_enable(clk);
parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
- parent = to_clk_hw_omap(parent_hw);
+ parent = to_clk_divider(parent_hw);
/* Restore the dividers */
if (!ret) {
- clksel_shift = __ffs(parent->clksel_mask);
- orig_v = __raw_readl(parent->clksel_reg);
+ orig_v = __raw_readl(parent->reg);
dummy_v = orig_v;
/* Write any other value different from the Read value */
- dummy_v ^= (1 << clksel_shift);
- __raw_writel(dummy_v, parent->clksel_reg);
+ dummy_v ^= (1 << parent->shift);
+ __raw_writel(dummy_v, parent->reg);
/* Write the original divider */
- __raw_writel(orig_v, parent->clksel_reg);
+ __raw_writel(orig_v, parent->reg);
}
return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 075f7cc5102..69337af748c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
},
};
+/* uart2 */
+static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
+ { .name = "tx", .dma_req = 28, },
+ { .name = "rx", .dma_req = 29, },
+ { .dma_req = -1 }
+};
+
static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
{ .irq = 73 + OMAP_INTC_START, },
{ .irq = -1 },
@@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
.clkdm_name = "l4ls_clkdm",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart2_irqs,
- .sdma_reqs = uart1_edma_reqs,
+ .sdma_reqs = uart2_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c01859398b5..5a2d8034c8d 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
/* Clear any pending PRCM interrupts */
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- if (omap3_has_iva())
- omap3_iva_idle();
+ /*
+ * We need to idle iva2_pwrdm even on am3703 with no iva2.
+ */
+ omap3_iva_idle();
omap3_d2d_idle();
}
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 9936c180bf0..8f595c0cc8d 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, pwrc_ids);
- if (!np)
- panic("unable to find compatible pwrc node in dtb\n");
+ if (!np) {
+ pr_err("unable to find compatible sirf pwrc node in dtb\n");
+ return -ENOENT;
+ }
/*
* pwrc behind rtciobrg is not located in memory space
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 435019ca0a4..d5e0cbc934c 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, rstc_ids);
- if (!np)
- panic("unable to find compatible rstc node in dtb\n");
+ if (!np) {
+ pr_err("unable to find compatible sirf rstc node in dtb\n");
+ return -ENOENT;
+ }
sirfsoc_rstc_base = of_iomap(np, 0);
if (!sirfsoc_rstc_base)
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index fdf3894b1cc..9696f364686 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clockevent_rating = 125,
+ .clockevent_rating = 80,
.clocksource_rating = 125,
};
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 33c353bc1c4..d6b7c8556fa 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -374,6 +374,7 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-DISPLAY",
.min_uV = 2800000,
@@ -387,6 +388,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
},
/* supplies to the on-board eMMC */
[AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-eMMC1",
.min_uV = 1100000,
@@ -402,6 +404,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
},
/* supply for VAUX3, supplies to SDcard slots */
[AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-MMC-SD",
.min_uV = 1100000,
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c
index 317a2be129f..a45dd09daed 100644
--- a/arch/arm/mach-ux500/cpuidle.c
+++ b/arch/arm/mach-ux500/cpuidle.c
@@ -21,6 +21,7 @@
#include <asm/proc-fns.h>
#include "db8500-regs.h"
+#include "id.h"
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
@@ -114,6 +115,9 @@ static struct cpuidle_driver ux500_idle_driver = {
int __init ux500_idle_init(void)
{
+ if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
+ return -ENODEV;
+
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
PRCMU_WAKEUP(ABB));
diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h
index 438b24846e7..02b66d723d1 100644
--- a/arch/arm/plat-samsung/include/plat/uncompress.h
+++ b/arch/arm/plat-samsung/include/plat/uncompress.h
@@ -66,6 +66,9 @@ uart_rd(unsigned int reg)
static void putc(int ch)
{
+ if (!config_enabled(CONFIG_DEBUG_LL))
+ return;
+
if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
int level;
@@ -118,7 +121,12 @@ static void arch_decomp_error(const char *x)
#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
static inline void arch_enable_uart_fifo(void)
{
- u32 fifocon = uart_rd(S3C2410_UFCON);
+ u32 fifocon;
+
+ if (!config_enabled(CONFIG_DEBUG_LL))
+ return;
+
+ fifocon = uart_rd(S3C2410_UFCON);
if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
fifocon |= S3C2410_UFCON_RESETBOTH;
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 53210ec4e8e..bd7124c87fe 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/serial_core.h>
#include <linux/io.h>
@@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
* require a full power-cycle)
*/
- if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
+ if (!of_have_populated_dt() &&
+ !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
!any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
/* save all necessary core registers not covered by the drivers */
- samsung_pm_save_gpios();
- samsung_pm_saved_gpios();
+ if (!of_have_populated_dt()) {
+ samsung_pm_save_gpios();
+ samsung_pm_saved_gpios();
+ }
+
s3c_pm_save_uarts();
s3c_pm_save_core();
@@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
s3c_pm_restore_core();
s3c_pm_restore_uarts();
- samsung_pm_restore_gpios();
- s3c_pm_restored_gpios();
+
+ if (!of_have_populated_dt()) {
+ samsung_pm_restore_gpios();
+ s3c_pm_restored_gpios();
+ }
s3c_pm_debug_init();
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 8cc83431805..2f6eec1e34b 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
+#ifndef CONFIG_GPIOLIB
static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
int err;
@@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha
return err;
}
-
+#endif /* !CONFIG_GPIOLIB */
#endif
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b0baa299f89..01b1b3f94fe 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -428,13 +428,16 @@ static void octeon_restart(char *command)
*/
static void octeon_kill_core(void *arg)
{
- mb();
- if (octeon_is_simulation()) {
- /* The simulator needs the watchdog to stop for dead cores */
- cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ if (octeon_is_simulation())
/* A break instruction causes the simulator stop a core */
- asm volatile ("sync\nbreak");
- }
+ asm volatile ("break" ::: "memory");
+
+ local_irq_disable();
+ /* Disable watchdog on this core. */
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ /* Spin in a low power mode. */
+ while (true)
+ asm volatile ("wait" ::: "memory");
}
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 820116067c1..516e6e9a559 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (! ((asid += ASID_INC) & ASID_MASK) ) {
if (cpu_has_vtag_icache)
flush_icache_all();
-#ifdef CONFIG_VIRTUALIZATION
+#ifdef CONFIG_KVM
kvm_local_flush_tlb_all(); /* start new asid cycle */
#else
local_flush_tlb_all(); /* start new asid cycle */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index a3186f2bb8a..5e6cd094739 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -16,6 +16,38 @@
#include <asm/isadep.h>
#include <uapi/asm/ptrace.h>
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ */
+struct pt_regs {
+#ifdef CONFIG_32BIT
+ /* Pad bytes for argument save space on the stack. */
+ unsigned long pad0[6];
+#endif
+
+ /* Saved main processor registers. */
+ unsigned long regs[32];
+
+ /* Saved special registers. */
+ unsigned long cp0_status;
+ unsigned long hi;
+ unsigned long lo;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ unsigned long acx;
+#endif
+ unsigned long cp0_badvaddr;
+ unsigned long cp0_cause;
+ unsigned long cp0_epc;
+#ifdef CONFIG_MIPS_MT_SMTC
+ unsigned long cp0_tcstatus;
+#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ unsigned long long mpl[3]; /* MTM{0,1,2} */
+ unsigned long long mtp[3]; /* MTP{0,1,2} */
+#endif
+} __aligned(8);
+
struct task_struct;
extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 3f424f5217d..f09ff5ae205 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -58,56 +58,53 @@ struct kvm_fpu {
* bits[2..0] - Register 'sel' index.
* bits[7..3] - Register 'rd' index.
* bits[15..8] - Must be zero.
- * bits[63..16] - 1 -> CP0 registers.
+ * bits[31..16] - 1 -> CP0 registers.
+ * bits[51..32] - Must be zero.
+ * bits[63..52] - As per linux/kvm.h
*
* Other sets registers may be added in the future. Each set would
- * have its own identifier in bits[63..16].
- *
- * The addr field of struct kvm_one_reg must point to an aligned
- * 64-bit wide location. For registers that are narrower than
- * 64-bits, the value is stored in the low order bits of the location,
- * and sign extended to 64-bits.
+ * have its own identifier in bits[31..16].
*
* The registers defined in struct kvm_regs are also accessible, the
* id values for these are below.
*/
-#define KVM_REG_MIPS_R0 0
-#define KVM_REG_MIPS_R1 1
-#define KVM_REG_MIPS_R2 2
-#define KVM_REG_MIPS_R3 3
-#define KVM_REG_MIPS_R4 4
-#define KVM_REG_MIPS_R5 5
-#define KVM_REG_MIPS_R6 6
-#define KVM_REG_MIPS_R7 7
-#define KVM_REG_MIPS_R8 8
-#define KVM_REG_MIPS_R9 9
-#define KVM_REG_MIPS_R10 10
-#define KVM_REG_MIPS_R11 11
-#define KVM_REG_MIPS_R12 12
-#define KVM_REG_MIPS_R13 13
-#define KVM_REG_MIPS_R14 14
-#define KVM_REG_MIPS_R15 15
-#define KVM_REG_MIPS_R16 16
-#define KVM_REG_MIPS_R17 17
-#define KVM_REG_MIPS_R18 18
-#define KVM_REG_MIPS_R19 19
-#define KVM_REG_MIPS_R20 20
-#define KVM_REG_MIPS_R21 21
-#define KVM_REG_MIPS_R22 22
-#define KVM_REG_MIPS_R23 23
-#define KVM_REG_MIPS_R24 24
-#define KVM_REG_MIPS_R25 25
-#define KVM_REG_MIPS_R26 26
-#define KVM_REG_MIPS_R27 27
-#define KVM_REG_MIPS_R28 28
-#define KVM_REG_MIPS_R29 29
-#define KVM_REG_MIPS_R30 30
-#define KVM_REG_MIPS_R31 31
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
-#define KVM_REG_MIPS_HI 32
-#define KVM_REG_MIPS_LO 33
-#define KVM_REG_MIPS_PC 34
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
/*
* KVM MIPS specific structures and definitions
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index 4d58d846870..b26f7e31727 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -22,16 +22,12 @@
#define DSP_CONTROL 77
#define ACX 78
+#ifndef __KERNEL__
/*
* This struct defines the way the registers are stored on the stack during a
* system call/exception. As usual the registers k0/k1 aren't being saved.
*/
struct pt_regs {
-#ifdef CONFIG_32BIT
- /* Pad bytes for argument save space on the stack. */
- unsigned long pad0[6];
-#endif
-
/* Saved main processor registers. */
unsigned long regs[32];
@@ -39,20 +35,11 @@ struct pt_regs {
unsigned long cp0_status;
unsigned long hi;
unsigned long lo;
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
- unsigned long acx;
-#endif
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long cp0_tcstatus;
-#endif /* CONFIG_MIPS_MT_SMTC */
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- unsigned long long mpl[3]; /* MTM{0,1,2} */
- unsigned long long mtp[3]; /* MTP{0,1,2} */
-#endif
} __attribute__ ((aligned (8)));
+#endif /* __KERNEL__ */
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index e06f777e9c4..1188e00bb12 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 97c5a1668e5..202e581e609 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index cf5509f13dd..dba90ec0dc3 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,12 +25,16 @@
#define MCOUNT_OFFSET_INSNS 4
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
+#endif
+
/*
* Check if the address is in kernel space
*
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3b09b888afa..0c655deeea4 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void)
}
/*
- * The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter.
- * alchemy/common/time.c may override cpu_wait!
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
*/
static void au1k_wait(void)
{
+ unsigned long c0status = read_c0_status() | 1; /* irqs on */
+
__asm__(
" .set mips3 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
- " nop \n"
+ " mtc0 %1, $12 \n" /* wr c0status */
" wait \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" .set mips0 \n"
- : : "r" (au1k_wait));
- local_irq_enable();
+ : : "r" (au1k_wait), "r" (c0status));
}
static int __initdata nowait;
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 93c070b41b0..6fa198db899 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -40,6 +40,7 @@
#include <asm/processor.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
+#include <asm/setup.h>
static struct rtlx_info *rtlx;
static int major;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e3be67012d7..a75ae40184a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -897,22 +897,24 @@ out_sigsegv:
asmlinkage void do_tr(struct pt_regs *regs)
{
- unsigned int opcode, tcode = 0;
+ u32 opcode, tcode = 0;
u16 instr[2];
- unsigned long epc = exception_epc(regs);
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
- if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
- (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+ __get_user(instr[1], (u16 __user *)(epc + 2)))
goto out_sigsegv;
- opcode = (instr[0] << 16) | instr[1];
-
- /* Immediate versions don't provide a code. */
- if (!(opcode & OPCODE)) {
- if (get_isa16_mode(regs->cp0_epc))
- /* microMIPS */
- tcode = (opcode >> 12) & 0x1f;
- else
- tcode = ((opcode >> 6) & ((1 << 10) - 1));
+ opcode = (instr[0] << 16) | instr[1];
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 12) & ((1 << 4) - 1);
+ } else {
+ if (__get_user(opcode, (u32 __user *)epc))
+ goto out_sigsegv;
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 6) & ((1 << 10) - 1);
}
do_trap_or_bp(regs, tcode, "Trap");
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index d934b017f47..dd203e59e6f 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -485,29 +485,35 @@ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
-#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
-#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
-#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
-#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
-#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
-#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
-#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
-#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
-#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
-#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
-#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
-#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
-#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
-#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
-#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
-#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
-#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
-#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
-#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
-#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
-#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
-#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
-#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)
+#define MIPS_CP0_32(_R, _S) \
+ (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S) \
+ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R0,
@@ -567,8 +573,6 @@ static u64 kvm_mips_get_one_regs[] = {
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- u64 __user *uaddr = (u64 __user *)(long)reg->addr;
-
struct mips_coproc *cop0 = vcpu->arch.cop0;
s64 v;
@@ -631,18 +635,39 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
default:
return -EINVAL;
}
- return put_user(v, uaddr);
+ if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+ u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+ return put_user(v, uaddr64);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+ u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+ u32 v32 = (u32)v;
+ return put_user(v32, uaddr32);
+ } else {
+ return -EINVAL;
+ }
}
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- u64 __user *uaddr = (u64 __user *)(long)reg->addr;
struct mips_coproc *cop0 = vcpu->arch.cop0;
u64 v;
- if (get_user(v, uaddr) != 0)
- return -EFAULT;
+ if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+ u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+ if (get_user(v, uaddr64) != 0)
+ return -EFAULT;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+ u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+ s32 v32;
+
+ if (get_user(v32, uaddr32) != 0)
+ return -EFAULT;
+ v = (s64)v32;
+ } else {
+ return -EINVAL;
+ }
switch (reg->id) {
case KVM_REG_MIPS_R0:
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index ce9818eef7d..afeef93f81a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -301,10 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata;
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
-#ifdef CONFIG_64BIT
-static int check_for_high_segbits __cpuinitdata;
-#endif
-
static int check_for_high_segbits __cpuinitdata;
static unsigned int kscratch_used_mask __cpuinitdata;
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index fb1569580de..6b5f3406f41 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -88,7 +88,7 @@ void __init plat_mem_setup(void)
__dt_setup_arch(&__dtb_start);
if (soc_info.mem_size)
- add_memory_region(soc_info.mem_base, soc_info.mem_size,
+ add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
BOOT_MEM_RAM);
else
detect_memory_region(soc_info.mem_base,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 26807e5aff5..6f3887d884d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
- CPU_FTR_HVMODE)
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
- CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
- CPU_FTR_UNALIGNED_LD_STD)
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE)
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_ICSWX | CPU_FTR_DABRX )
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8beaf..46793b58a76 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -513,7 +513,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+ FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382cb34..851bac7afa4 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+ BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
-/* altivec */
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
-
/* book3s */
#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 1f0937d7d4b..2a45d0f0438 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -452,8 +452,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_cpu_type = 0,
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
@@ -506,8 +506,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = 0,
- .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 246b11c4fe7..8741c854e03 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
std r0, THREAD_EBBHR(r3)
mfspr r0, SPRN_EBBRR
std r0, THREAD_EBBRR(r3)
-
- /* PMU registers made user read/(write) by EBB */
- mfspr r0, SPRN_SIAR
- std r0, THREAD_SIAR(r3)
- mfspr r0, SPRN_SDAR
- std r0, THREAD_SDAR(r3)
- mfspr r0, SPRN_SIER
- std r0, THREAD_SIER(r3)
- mfspr r0, SPRN_MMCR0
- std r0, THREAD_MMCR0(r3)
- mfspr r0, SPRN_MMCR2
- std r0, THREAD_MMCR2(r3)
- mfspr r0, SPRN_MMCRA
- std r0, THREAD_MMCRA(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
@@ -581,20 +567,6 @@ BEGIN_FTR_SECTION
ld r0, THREAD_EBBRR(r4)
mtspr SPRN_EBBRR, r0
- /* PMU registers made user read/(write) by EBB */
- ld r0, THREAD_SIAR(r4)
- mtspr SPRN_SIAR, r0
- ld r0, THREAD_SDAR(r4)
- mtspr SPRN_SDAR, r0
- ld r0, THREAD_SIER(r4)
- mtspr SPRN_SIER, r0
- ld r0, THREAD_MMCR0(r4)
- mtspr SPRN_MMCR0, r0
- ld r0, THREAD_MMCR2(r4)
- mtspr SPRN_MMCR2, r0
- ld r0, THREAD_MMCRA(r4)
- mtspr SPRN_MMCRA, r0
-
ld r0,THREAD_TAR(r4)
mtspr SPRN_TAR,r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6eba1bf61a..40e4a17c8ba 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
xori r10,r10,(MSR_FE0|MSR_FE1)
mtmsrd r10
sync
- fmr 0,0
- fmr 1,1
- fmr 2,2
- fmr 3,3
- fmr 4,4
- fmr 5,5
- fmr 6,6
- fmr 7,7
- fmr 8,8
- fmr 9,9
- fmr 10,10
- fmr 11,11
- fmr 12,12
- fmr 13,13
- fmr 14,14
- fmr 15,15
- fmr 16,16
- fmr 17,17
- fmr 18,18
- fmr 19,19
- fmr 20,20
- fmr 21,21
- fmr 22,22
- fmr 23,23
- fmr 24,24
- fmr 25,25
- fmr 26,26
- fmr 27,27
- fmr 28,28
- fmr 29,29
- fmr 30,30
- fmr 31,31
+
+#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n) FMR2(n) ; FMR2(n+2)
+#define FMR8(n) FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+ FMR32(0)
+
FTR_SECTION_ELSE
/*
* To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
oris r10,r10,MSR_VSX@h
mtmsrd r10
sync
- XVCPSGNDP(0,0,0)
- XVCPSGNDP(1,1,1)
- XVCPSGNDP(2,2,2)
- XVCPSGNDP(3,3,3)
- XVCPSGNDP(4,4,4)
- XVCPSGNDP(5,5,5)
- XVCPSGNDP(6,6,6)
- XVCPSGNDP(7,7,7)
- XVCPSGNDP(8,8,8)
- XVCPSGNDP(9,9,9)
- XVCPSGNDP(10,10,10)
- XVCPSGNDP(11,11,11)
- XVCPSGNDP(12,12,12)
- XVCPSGNDP(13,13,13)
- XVCPSGNDP(14,14,14)
- XVCPSGNDP(15,15,15)
- XVCPSGNDP(16,16,16)
- XVCPSGNDP(17,17,17)
- XVCPSGNDP(18,18,18)
- XVCPSGNDP(19,19,19)
- XVCPSGNDP(20,20,20)
- XVCPSGNDP(21,21,21)
- XVCPSGNDP(22,22,22)
- XVCPSGNDP(23,23,23)
- XVCPSGNDP(24,24,24)
- XVCPSGNDP(25,25,25)
- XVCPSGNDP(26,26,26)
- XVCPSGNDP(27,27,27)
- XVCPSGNDP(28,28,28)
- XVCPSGNDP(29,29,29)
- XVCPSGNDP(30,30,30)
- XVCPSGNDP(31,31,31)
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+ XVCPSGNDP32(0)
+
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ b denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+ XVCPSGNDP32(32)
+denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
#ifdef CONFIG_PPC_DOORBELL
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5cbcf4d5a80..ea185e0b3ca 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled
*/
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- if (decrementer_check_overflow())
+ if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
/* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7f2273cc3c7..eabeec99101 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -827,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
+ struct pci_bus_region reg;
if (!res->flags)
continue;
@@ -835,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
+ pcibios_resource_to_bus(dev, &reg, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
- (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+ (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a902723fdc6..076d1242507 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{
mtspr(SPRN_DABR, dabr);
- mtspr(SPRN_DABRX, dabrx);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
return 0;
}
#else
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
}
/* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f18c79c324e..c0e5caf8ccc 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1165,6 +1165,16 @@ bail:
exception_exit(prev_state);
}
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+ regs->msr |= REASON_ILLEGAL;
+ program_check_exception(regs);
+}
+
void alignment_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5dd3ab46997..ed038544814 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
+ int idx;
gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL;
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (tlbe_is_host_safe(vcpu, tlbe)) {
gva_t eaddr;
gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
tlbe->word2);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1020119226d..5cd7ad0c117 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -832,6 +832,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int r = RESUME_HOST;
int s;
+ int idx;
+
+#ifdef CONFIG_PPC64
+ WARN_ON(local_paca->irq_happened != 0);
+#endif
+
+ /*
+ * We enter with interrupts disabled in hardware, but
+ * we need to call hard_irq_disable anyway to ensure that
+ * the software state is kept in sync.
+ */
+ hard_irq_disable();
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1065,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@@ -1075,6 +1089,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
@@ -1098,6 +1113,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@@ -1114,6 +1131,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index c41a5a96b55..6d6f153b6c1 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
int recal = 0;
+ int idx;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 753cc99eff2..19c8379575f 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
- else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
- r = 0;
else
r = -ENOTSUPP;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 845c867444e..29c6482890c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1758,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
}
}
}
- if ((!found) && printk_ratelimit())
+ if (!found && !nmi && printk_ratelimit())
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f93573..b456b157d33 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
- /* necessary sanity check */
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
- } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
- ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
- "<ibm,get-config-addr-info> invalid\n",
- __func__);
- return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ac01463038f..e8b6e5b8932 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
" csg %0,%1,%2\n"
" jl 0b\n"
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
- : "Q" (ptep[PTRS_PER_PTE]) : "cc");
+ : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
#endif
return __pgste(new);
}
@@ -635,11 +635,19 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
" nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
" stg %1,%0\n"
: "=Q" (ptep[PTRS_PER_PTE])
- : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
+ : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
+ : "cc", "memory");
preempt_enable();
#endif
}
+static inline void pgste_set(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+ *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
+#endif
+}
+
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
@@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
{
#ifdef CONFIG_PGSTE
unsigned long address;
- unsigned long okey, nkey;
+ unsigned long nkey;
if (pte_val(entry) & _PAGE_INVALID)
return;
+ VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
address = pte_val(entry) & PAGE_MASK;
- okey = nkey = page_get_storage_key(address);
- nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
- /* Set page access key and fetch protection bit from pgste */
- nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
- if (okey != nkey)
- page_set_storage_key(address, nkey, 0);
+ /*
+ * Set page access key and fetch protection bit from pgste.
+ * The guest C/R information is still in the PGSTE, set real
+ * key C/R to 0.
+ */
+ nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
+ page_set_storage_key(address, nkey, 0);
#endif
}
@@ -1099,8 +1109,10 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
+ pgste_set(ptep, pgste);
+ }
return pte;
}
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 29829747725..87acc38f73c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
static void show_trace(struct task_struct *task, unsigned long *stack)
{
+ const unsigned long frame_size =
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
register unsigned long __r15 asm ("15");
unsigned long sp;
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
+ sp = __show_trace(sp,
+ S390_lowcore.panic_stack + frame_size - 4096,
+ S390_lowcore.panic_stack + frame_size);
#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
+ sp = __show_trace(sp,
+ S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f7fb58903f6..408e866ae54 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void)
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
+
+void synchronize_irq(unsigned int irq)
+{
+ /*
+ * Not needed, the handler is protected by a lock and IRQs that occur
+ * after the handler is deleted are just NOPs.
+ */
+}
+EXPORT_SYMBOL_GPL(synchronize_irq);
+
+#ifndef CONFIG_PCI
+
+/* Only PCI devices have dynamically-defined IRQ handlers */
+
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(free_irq);
+
+void enable_irq(unsigned int irq)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+ WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(disable_irq);
+
+#endif /* !CONFIG_PCI */
+
+void disable_irq_nosync(unsigned int irq)
+{
+ disable_irq(irq);
+}
+EXPORT_SYMBOL_GPL(disable_irq_nosync);
+
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_on);
+
+int probe_irq_off(unsigned long val)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long val)
+{
+ return val;
+}
+EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index b6506ee32a3..29bd7bec417 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -225,7 +225,7 @@ _sclp_print:
ahi %r2,1
ltr %r0,%r0 # end of string?
jz .LfinalizemtoS4
- chi %r0,0x15 # end of line (NL)?
+ chi %r0,0x0a # end of line (NL)?
jz .LfinalizemtoS4
stc %r0,0(%r6,%r7) # copy to mto
la %r11,0(%r6,%r7)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e6f15b5d8b7..f1e5be85d59 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
return rc;
}
-void synchronize_irq(unsigned int irq)
-{
- /*
- * Not needed, the handler is protected by a lock and IRQs that occur
- * after the handler is deleted are just NOPs.
- */
-}
-EXPORT_SYMBOL_GPL(synchronize_irq);
-
void enable_irq(unsigned int irq)
{
struct msi_desc *msi = irq_get_msi_desc(irq);
@@ -327,30 +318,6 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL_GPL(disable_irq);
-void disable_irq_nosync(unsigned int irq)
-{
- disable_irq(irq);
-}
-EXPORT_SYMBOL_GPL(disable_irq_nosync);
-
-unsigned long probe_irq_on(void)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_on);
-
-int probe_irq_off(unsigned long val)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_off);
-
-unsigned int probe_irq_mask(unsigned long val)
-{
- return val;
-}
-EXPORT_SYMBOL_GPL(probe_irq_mask);
-
void pcibios_fixup_bus(struct pci_bus *bus)
{
}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 9f20566b077..79cc0d1a477 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
+ unsigned long flags;
void *new_val;
int err;
@@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
@@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
prevp = &(*prevp)->next;
}
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
mutex_unlock(&of_set_property_mutex);
/* XXX Upate procfs if necessary... */
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 35ee62fccf9..c205035a6b9 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
-static efi_status_t setup_efi_vars(struct boot_params *params)
-{
- struct setup_data *data;
- struct efi_var_bootdata *efidata;
- u64 store_size, remaining_size, var_size;
- efi_status_t status;
-
- if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
- return EFI_UNSUPPORTED;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
- EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
- &remaining_size, &var_size);
-
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, sizeof(*efidata), &efidata);
-
- if (status != EFI_SUCCESS)
- return status;
-
- efidata->data.type = SETUP_EFI_VARS;
- efidata->data.len = sizeof(struct efi_var_bootdata) -
- sizeof(struct setup_data);
- efidata->data.next = 0;
- efidata->store_size = store_size;
- efidata->remaining_size = remaining_size;
- efidata->max_var_size = var_size;
-
- if (data)
- data->next = (unsigned long)efidata;
- else
- params->hdr.setup_data = (unsigned long)efidata;
-
-}
-
static efi_status_t setup_efi_pci(struct boot_params *params)
{
efi_pci_io_protocol *pci;
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
- setup_efi_vars(boot_params);
-
setup_efi_pci(boot_params);
status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 2fb5d5884e2..60c89f30c72 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
-struct efi_var_bootdata {
- struct setup_data data;
- u64 store_size;
- u64 remaining_size;
- u64 max_var_size;
-};
-
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 08744242b8d..c15ddaf9071 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,7 +6,6 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
-#define SETUP_EFI_VARS 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 7a6f3b3be3c..f2bb9c96720 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -160,7 +160,7 @@ identity_mapped:
xorq %rbp, %rbp
xorq %r8, %r8
xorq %r9, %r9
- xorq %r10, %r9
+ xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index eaac1743def..1f34e921977 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
end_pfn = limit_pfn;
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+ if (!after_bootmem)
+ adjust_range_page_size_mask(mr, nr_range);
+
/* try to merge same page size and continuous */
for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
unsigned long old_start;
@@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
nr_range--;
}
- if (!after_bootmem)
- adjust_range_page_size_mask(mr, nr_range);
-
for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
mr[i].start, mr[i].end - 1,
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 82089d8b195..5ae2eb09419 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>
-#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/efi.h>
@@ -54,12 +53,12 @@
#define EFI_DEBUG 1
-/*
- * There's some additional metadata associated with each
- * variable. Intel's reference implementation is 60 bytes - bump that
- * to account for potential alignment constraints
- */
-#define VAR_METADATA_SIZE 64
+#define EFI_MIN_RESERVE 5120
+
+#define EFI_DUMMY_GUID \
+ EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
+
+static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
@@ -79,13 +78,6 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
-static u64 efi_var_store_size;
-static u64 efi_var_remaining_size;
-static u64 efi_var_max_var_size;
-static u64 boot_used_size;
-static u64 boot_var_size;
-static u64 active_size;
-
unsigned long x86_efi_facility;
/*
@@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- efi_status_t status;
- static bool finished = false;
- static u64 var_size;
-
- status = efi_call_virt3(get_next_variable,
- name_size, name, vendor);
-
- if (status == EFI_NOT_FOUND) {
- finished = true;
- if (var_size < boot_used_size) {
- boot_var_size = boot_used_size - var_size;
- active_size += boot_var_size;
- } else {
- printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
- }
- }
-
- if (boot_used_size && !finished) {
- unsigned long size = 0;
- u32 attr;
- efi_status_t s;
- void *tmp;
-
- s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
-
- if (s != EFI_BUFFER_TOO_SMALL || !size)
- return status;
-
- tmp = kmalloc(size, GFP_ATOMIC);
-
- if (!tmp)
- return status;
-
- s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
-
- if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
- var_size += size;
- var_size += ucs2_strsize(name, 1024);
- active_size += size;
- active_size += VAR_METADATA_SIZE;
- active_size += ucs2_strsize(name, 1024);
- }
-
- kfree(tmp);
- }
-
- return status;
+ return efi_call_virt3(get_next_variable,
+ name_size, name, vendor);
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- efi_status_t status;
- u32 orig_attr = 0;
- unsigned long orig_size = 0;
-
- status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL)
- orig_size = 0;
-
- status = efi_call_virt5(set_variable,
- name, vendor, attr,
- data_size, data);
-
- if (status == EFI_SUCCESS) {
- if (orig_size) {
- active_size -= orig_size;
- active_size -= ucs2_strsize(name, 1024);
- active_size -= VAR_METADATA_SIZE;
- }
- if (data_size) {
- active_size += data_size;
- active_size += ucs2_strsize(name, 1024);
- active_size += VAR_METADATA_SIZE;
- }
- }
-
- return status;
+ return efi_call_virt5(set_variable,
+ name, vendor, attr,
+ data_size, data);
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -786,9 +708,6 @@ void __init efi_init(void)
char vendor[100] = "unknown";
int i = 0;
void *tmp;
- struct setup_data *data;
- struct efi_var_bootdata *efi_var_data;
- u64 pa_data;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@@ -806,22 +725,6 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
- pa_data = boot_params.hdr.setup_data;
- while (pa_data) {
- data = early_ioremap(pa_data, sizeof(*efi_var_data));
- if (data->type == SETUP_EFI_VARS) {
- efi_var_data = (struct efi_var_bootdata *)data;
-
- efi_var_store_size = efi_var_data->store_size;
- efi_var_remaining_size = efi_var_data->remaining_size;
- efi_var_max_var_size = efi_var_data->max_var_size;
- }
- pa_data = data->next;
- early_iounmap(data, sizeof(*efi_var_data));
- }
-
- boot_used_size = efi_var_store_size - efi_var_remaining_size;
-
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
/*
@@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void)
runtime_code_page_mkexec();
kfree(new_memmap);
+
+ /* clean DUMMY object */
+ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ 0, NULL);
}
/*
@@ -1136,33 +1046,65 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
efi_status_t status;
u64 storage_size, remaining_size, max_size;
+ if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
+ return 0;
+
status = efi.query_variable_info(attributes, &storage_size,
&remaining_size, &max_size);
if (status != EFI_SUCCESS)
return status;
- if (!max_size && remaining_size > size)
- printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
- " is returning MaxVariableSize=0\n");
/*
* Some firmware implementations refuse to boot if there's insufficient
* space in the variable store. We account for that by refusing the
* write if permitting it would reduce the available space to under
- * 50%. However, some firmware won't reclaim variable space until
- * after the used (not merely the actively used) space drops below
- * a threshold. We can approximate that case with the value calculated
- * above. If both the firmware and our calculations indicate that the
- * available space would drop below 50%, refuse the write.
+ * 5KB. This figure was provided by Samsung, so should be safe.
*/
+ if ((remaining_size - size < EFI_MIN_RESERVE) &&
+ !efi_no_storage_paranoia) {
+
+ /*
+ * Triggering garbage collection may require that the firmware
+ * generate a real EFI_OUT_OF_RESOURCES error. We can force
+ * that by attempting to use more space than is available.
+ */
+ unsigned long dummy_size = remaining_size + 1024;
+ void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+
+ status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ dummy_size, dummy);
+
+ if (status == EFI_SUCCESS) {
+ /*
+ * This should have failed, so if it didn't make sure
+ * that we delete it...
+ */
+ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ 0, dummy);
+ }
- if (!storage_size || size > remaining_size ||
- (max_size && size > max_size))
- return EFI_OUT_OF_RESOURCES;
+ /*
+ * The runtime code may now have triggered a garbage collection
+ * run, so check the variable info again
+ */
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
- if (!efi_no_storage_paranoia &&
- ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
- (remaining_size - size < storage_size / 2)))
- return EFI_OUT_OF_RESOURCES;
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /*
+ * There still isn't enough room, so return an error
+ */
+ if (remaining_size - size < EFI_MIN_RESERVE)
+ return EFI_OUT_OF_RESOURCES;
+ }
return EFI_SUCCESS;
}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 590be109089..f7bab68a4b8 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
-#if ELF_BITS == 64
- "__vvar_page|"
-#endif
"__crc_)",
/*
@@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__per_cpu_load|"
"init_per_cpu__.*|"
"__end_rodata_hpage_align|"
+ "__vvar_page|"
#endif
"_end)$"
};
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index fb44426fe93..d99cae8147d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/irq_work.h>
+#include <linux/tick.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
+ /*
+ * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
+ * clears certain data that the cpu_idle loop (which called us
+ * and that we return from) expects. The only way to get that
+ * data back is to call:
+ */
+ tick_nohz_idle_enter();
}
#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/block/blk-core.c b/block/blk-core.c
index 33c33bc99dd..d5745b5833c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
q->rpm_status = RPM_ACTIVE;
__blk_run_queue(q);
pm_runtime_mark_last_busy(q->dev);
- pm_runtime_autosuspend(q->dev);
+ pm_request_autosuspend(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 622d8a48cbe..bf8148e74e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -823,6 +823,7 @@ config CRYPTO_BLOWFISH_X86_64
config CRYPTO_BLOWFISH_AVX2_X86_64
tristate "Blowfish cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
+ depends on BROKEN
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
@@ -1299,6 +1300,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
config CRYPTO_TWOFISH_AVX2_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
+ depends on BROKEN
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 403baf4dffc..fcd7d91cec3 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -919,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev)
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
/* External interrupt vector is GSI */
- if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+ rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
+ if (rc) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
}
- if (request_irq(ghes->irq, ghes_irq_func,
- 0, "GHES IRQ", ghes)) {
+ rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bc493aa3af1..318fa32a141 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -278,11 +278,13 @@ int acpi_bus_init_power(struct acpi_device *device)
if (result)
return result;
} else if (state == ACPI_STATE_UNKNOWN) {
- /* No power resources and missing _PSC? Try to force D0. */
+ /*
+ * No power resources and missing _PSC? Cross fingers and make
+ * it D0 in hope that this is what the BIOS put the device into.
+ * [We tried to force D0 here by executing _PS0, but that broke
+ * Toshiba P870-303 in a nasty way.]
+ */
state = ACPI_STATE_D0;
- result = acpi_dev_pm_explicit_set(device, state);
- if (result)
- return result;
}
device->power.state = state;
return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 44225cb15f3..b14ac46948c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
return -ENOSYS;
result = driver->ops.add(device);
- if (result) {
- device->driver = NULL;
- device->driver_data = NULL;
+ if (result)
return result;
- }
device->driver = driver;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5b32e15a65c..440eadf2d32 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -458,12 +458,28 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion g6 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
.ident = "HP 1000 Notebook PC",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion m4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+ },
+ },
{}
};
@@ -1706,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
int error;
acpi_status status;
+ if (device->handler)
+ return -EINVAL;
+
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1,
acpi_video_bus_match, NULL,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b..02f490bad30 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
int registers = 0;
int this_registers, average;
- map->lock(map);
+ map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
nodes, registers, average, mem_size);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return 0;
}
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
- if (rbnode->base_reg < min)
- continue;
if (rbnode->base_reg > max)
break;
if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396b..507ee2da0f6 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- map->lock(map);
+ map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- map->lock(map);
+ map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
- map->lock(map);
+ map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
- map->lock(map);
+ map->lock(map->lock_arg);
map->cache_dirty = true;
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
- map->lock(map);
+ map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2..975719bc345 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
char *start = buf;
unsigned long reg, value;
struct regmap *map = file->private_data;
+ int ret;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
- regmap_write(map, reg, value);
+ ret = regmap_write(map, reg, value);
+ if (ret < 0)
+ return ret;
return buf_size;
}
#else
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6374dc10352..62b6c2cc80b 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
.open = cciss_unlocked_open,
.release = cciss_release,
- .ioctl = do_ioctl,
+ .ioctl = cciss_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&cciss_mutex);
}
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- int ret;
- mutex_lock(&cciss_mutex);
- ret = cciss_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&cciss_mutex);
- return ret;
-}
-
#ifdef CONFIG_COMPAT
static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
- return do_ioctl(bdev, mode, cmd, arg);
+ return cciss_ioctl(bdev, mode, cmd, arg);
case CCISS_PASSTHRU32:
return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
- err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
+ err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
- err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
+ err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
{
cciss_coalint_struct intinfo;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user
(argp, &intinfo, sizeof(cciss_coalint_struct)))
return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
{
NodeName_type NodeName;
+ unsigned long flags;
int i;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
for (i = 0; i < 16; i++)
NodeName[i] = readb(&h->cfgtable->ServerName[i]);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
return -EFAULT;
return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
{
Heartbeat_type heartbeat;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
heartbeat = readl(&h->cfgtable->HeartBeat);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
return -EFAULT;
return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
{
BusTypes_type BusTypes;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
BusTypes = readl(&h->cfgtable->BusTypes);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
return -EFAULT;
return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 847107ef0cc..20dd52a2f92 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
- debugfs_remove_recursive(dd->dfs_node);
+ if (dd->dfs_node)
+ debugfs_remove_recursive(dd->dfs_node);
}
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
struct bio_vec *bvec;
- int nents = 0;
+ int i, nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, nents) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg[nents],
bvec->bv_page,
bvec->bv_len,
bvec->bv_offset);
+ nents++;
}
/* Issue the read/write. */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a5..ce79a590b45 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct nvme_command *cmnd;
struct nvme_iod *iod;
enum dma_data_direction dma_dir;
- int cmdid, length, result = -ENOMEM;
+ int cmdid, length, result;
u16 control;
u32 dsmgmt;
int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
+ result = -ENOMEM;
iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
if (timeout && !time_after(now, info[cmdid].timeout))
continue;
+ if (info[cmdid].ctx == CMD_CTX_CANCELLED)
+ continue;
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
if (addr & 3)
return ERR_PTR(-EINVAL);
- if (!length)
+ if (!length || length > INT_MAX - PAGE_SIZE)
return ERR_PTR(-EINVAL);
offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
sg_init_table(sg, count);
for (i = 0; i < count; i++) {
sg_set_page(&sg[i], pages[i],
- min_t(int, length, PAGE_SIZE - offset), offset);
+ min_t(unsigned, length, PAGE_SIZE - offset),
+ offset);
length -= (PAGE_SIZE - offset);
offset = 0;
}
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
nvme_free_iod(dev, iod);
}
- if (!status && copy_to_user(&ucmd->result, &cmd.result,
+ if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
sizeof(cmd.result)))
status = -EFAULT;
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
+ struct pci_dev *pdev = dev->pci_dev;
+ int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
+ q_count = nr_io_queues;
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
if (db_bar_size > 8192) {
iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
- db_bar_size);
+ dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
for (;;) {
- result = pci_enable_msix(dev->pci_dev, dev->entry,
- nr_io_queues);
+ result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
if (result == 0) {
break;
} else if (result > 0) {
nr_io_queues = result;
continue;
} else {
- nr_io_queues = 1;
+ nr_io_queues = 0;
break;
}
}
+ if (nr_io_queues == 0) {
+ nr_io_queues = q_count;
+ for (;;) {
+ result = pci_enable_msi_block(pdev, nr_io_queues);
+ if (result == 0) {
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].vector = i + pdev->irq;
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+ }
+
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
/* XXX: handle failure here */
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev);
- pci_disable_msix(dev->pci_dev);
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
iounmap(dev->bar);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ else
+ goto disable;
+
result = nvme_set_instance(dev);
if (result)
goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
unmap:
iounmap(dev->bar);
disable_msix:
- pci_disable_msix(pdev);
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b03989..102de2f52b5 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <scsi/sg.h>
#include <scsi/scsi.h>
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
}
}
-static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *mode_page, u8 page_code)
{
int res = SNTI_TRANSLATION_SUCCESS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3c08983e600..f5d0ea11d9f 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -83,7 +83,8 @@
#define MAX_SPEED 0xffff
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
+#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
+ ~(sector_t)((pd)->settings.size - 1))
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5..3063452e55d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
};
/*
- * Initialize an rbd client instance.
- * We own *ceph_opts.
+ * Initialize an rbd client instance. Success or not, this function
+ * consumes ceph_opts.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
/*
* Get a ceph client with specific addr and configuration, if one does
- * not exist create it.
+ * not exist create it. Either way, ceph_opts is consumed by this
+ * function.
*/
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
@@ -4697,8 +4698,10 @@ out:
return ret;
}
-/* Undo whatever state changes are made by v1 or v2 image probe */
-
+/*
+ * Undo whatever state changes are made by v1 or v2 header info
+ * call.
+ */
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
@@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
int tmp;
/*
- * Get the id from the image id object. If it's not a
- * format 2 image, we'll get ENOENT back, and we'll assume
- * it's a format 1 image.
+ * Get the id from the image id object. Unless there's an
+ * error, rbd_dev->spec->image_id will be filled in with
+ * a dynamically-allocated string, and rbd_dev->image_format
+ * will be set to either 1 or 2.
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
@@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = PTR_ERR(rbdc);
goto err_out_args;
}
- ceph_opts = NULL; /* rbd_dev client now owns this */
/* pick the pool */
osdc = &rbdc->client->osdc;
@@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
rbd_dev->mapping.read_only = read_only;
rc = rbd_dev_device_setup(rbd_dev);
- if (!rc)
- return count;
+ if (rc) {
+ rbd_dev_image_release(rbd_dev);
+ goto err_out_module;
+ }
+
+ return count;
- rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
err_out_args:
- if (ceph_opts)
- ceph_destroy_options(ceph_opts);
- kfree(rbd_opts);
rbd_spec_put(spec);
err_out_module:
module_put(THIS_MODULE);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fdfd61a2d52..11a6104a1e4 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -201,7 +201,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8897.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -214,7 +214,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6..9a9f51875df 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ BT_DBG("main_thread: break from main thread");
+ break;
+ }
if (adapter->wakeup_tries ||
((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up");
- if (kthread_should_stop()) {
- BT_DBG("main_thread: break from main thread");
- break;
- }
-
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c63488c54f4..13693b7a0d5 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_2 = 0x7a,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
+ .cfg = 0x00,
+ .host_int_mask = 0x02,
+ .host_intstatus = 0x03,
+ .card_status = 0x50,
+ .sq_read_base_addr_a0 = 0x60,
+ .sq_read_base_addr_a1 = 0x61,
+ .card_revision = 0xbc,
+ .card_fw_status0 = 0xc0,
+ .card_fw_status1 = 0xc1,
+ .card_rx_len = 0xc2,
+ .card_rx_unit = 0xc3,
+ .io_port_0 = 0xd8,
+ .io_port_1 = 0xd9,
+ .io_port_2 = 0xda,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.sd_blksz_fw_dl = 256,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8897_uapsta.bin",
+ .reg = &btmrvl_reg_88xx,
+ .sd_blksz_fw_dl = 256,
+};
+
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
+ /* Marvell SD8897 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
{ } /* Terminating entry */
};
@@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 11b8b4b54ce..edc089e9d0c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index a64eb8b7044..ad1fde27766 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
- long freq_Hz;
+ long freq_Hz, freq_exact;
unsigned int index;
int ret;
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
+ freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
}
- ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5af40ad82d2..dc9b72e25c1 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,6 +26,7 @@
#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/cpu.h>
#include "cpufreq_governor.h"
@@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
+ get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
+ put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index a97bb6c1596..c3dc1c04a5d 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
+MODULE_DEVICE_TABLE(of, sahara_dt_ids);
static int sahara_probe(struct platform_device *pdev)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d8ce4ecfef1..e88ded2c8d2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -716,8 +716,7 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait,
- done.done || kthread_should_stop(),
+ wait_event_freezable_timeout(done_wait, done.done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info)
static int __restart_threaded_test(struct dmatest_info *info, bool run)
{
struct dmatest_params *params = &info->params;
- int ret;
/* Stop any running test first */
__stop_threaded_test(info);
@@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
memcpy(params, &info->dbgfs_params, sizeof(*params));
/* Run test with new parameters */
- ret = __run_threaded_test(info);
- if (ret) {
- __stop_threaded_test(info);
- pr_err("dmatest: Can't run test\n");
+ return __run_threaded_test(info);
+}
+
+static bool __is_threaded_test_run(struct dmatest_info *info)
+{
+ struct dmatest_chan *dtc;
+
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
}
- return ret;
+ return false;
}
static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
@@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
{
struct dmatest_info *info = file->private_data;
char buf[3];
- struct dmatest_chan *dtc;
- bool alive = false;
mutex_lock(&info->lock);
- list_for_each_entry(dtc, &info->channels, node) {
- struct dmatest_thread *thread;
-
- list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done) {
- alive = true;
- break;
- }
- }
- }
- if (alive) {
+ if (__is_threaded_test_run(info)) {
buf[0] = 'Y';
} else {
__stop_threaded_test(info);
@@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
if (strtobool(buf, &bv) == 0) {
mutex_lock(&info->lock);
- ret = __restart_threaded_test(info, bv);
+
+ if (__is_threaded_test_run(info))
+ ret = -EBUSY;
+ else
+ ret = __restart_threaded_test(info, bv);
+
mutex_unlock(&info->lock);
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1734feec47b..71bf4ec300e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
return;
}
- if (d40_queue_start(d40c) == NULL)
+ if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a6a8643a6a7..8bcce7866d3 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
- /* vblank is not initialized (IRQ not installed ?) */
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
/*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 3cfd0931fbf..82430ad8ba6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr = 0;
struct gtt_range *gt;
struct drm_gem_object *obj;
- int ret;
+ int ret = 0;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- return ret;
+ goto unref_cursor;
}
addr = gt->offset; /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = obj;
}
- return 0;
+
+ psb_intel_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
}
static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
+static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = cdv_intel_crtc_dpms,
.mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.mode_set_base = cdv_intel_pipe_set_base,
.prepare = cdv_intel_crtc_prepare,
.commit = cdv_intel_crtc_commit,
+ .disable = cdv_intel_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1534e220097..8b1b6d923ab 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address;
int ret;
unsigned long pfn;
- /* FIXME: assumes fb at stolen base which may not be true */
- unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
+ psbfb->gtt->offset;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6e8f42b61ff..6666493789d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
void *tmp_dst, *tmp_src;
- int ret, i, cursor_pages;
+ int ret = 0, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- return ret;
+ goto unref_cursor;
}
if (dev_priv->ops->cursor_needs_phys) {
if (cursor_gt == NULL) {
dev_err(dev->dev, "No hardware cursor mem available");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
/* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = obj;
}
- return 0;
+
+ psb_intel_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
}
static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
+static void psb_intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = psb_intel_crtc_dpms,
.mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.mode_set_base = psb_intel_pipe_set_base,
.prepare = psb_intel_crtc_prepare,
.commit = psb_intel_crtc_commit,
+ .disable = psb_intel_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6cf8e84397..970ad17c99a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error))
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
- /* GPU is already declared terminally dead, give up. */
- if (i915_terminally_wedged(error))
- return -EIO;
-
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ad1117bebd7..56746dcac40 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev)
memset(&pipe_config, 0, sizeof(pipe_config));
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
+
+ /* hw state is inconsistent with the pipe A quirk */
+ if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ active = crtc->active;
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f36f1baabd5..29412cc89c7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .ident = "Hewlett-Packard HP t5740",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
},
},
{
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d15428404b9..d4ea6c265ce 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->i2c);
- if (list_empty(&connector->probed_modes) == false)
- goto end;
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
- /* Fetch modes from VBT */
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode. Since
+ * drm_mode_probed_add adds the mode at the head of the list we add it
+ * last.
+ */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
}
}
-end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
- /* Only enable the hotplug irq if we need it, to work around noisy
- * hotplug lines.
- */
- if (intel_sdvo->hotplug_active)
- intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
-
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
goto err_output;
}
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active) {
+ intel_encoder->hpd_pin =
+ intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
+ }
+
/*
* Cloning SDVO with anything is often impossible, since the SDVO
* encoder can request a special input timing mode. And even if that's
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 77b8a45fb10..ee66badc8bb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1034,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
else
hi_pri_lvl = 5;
- WREG8(0x1fde, 0x06);
- WREG8(0x1fdf, hi_pri_lvl);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
} else {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
if (mdev->reg_1e24 >= 0x01)
- WREG8(0x1fdf, 0x03);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x03);
else
- WREG8(0x1fdf, 0x04);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x04);
}
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index d0817d94454..f02fd9f443f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
{
const u32 doff = (or * 0x800);
int load = -EINVAL;
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
- udelay(9500);
+ mdelay(9);
+ udelay(500);
nv_wr32(priv, 0x61a00c + doff, 0x80000000);
load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
return load;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 0d36bdc5141..7fdade6e604 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_wr32(priv, 0x616510 + hoff, 0x00000000);
nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
/* ??? */
nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 89bf459d584..e9b8217d007 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -40,14 +40,13 @@
* FIFO channel objects
******************************************************************************/
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+static void
+nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
- mutex_lock(&nv_subdev(priv)->mutex);
cur = priv->playlist[priv->cur_playlist];
priv->cur_playlist = !priv->cur_playlist;
@@ -61,6 +60,13 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
nv_wr32(priv, 0x0032f4, cur->addr >> 12);
nv_wr32(priv, 0x0032ec, p);
nv_wr32(priv, 0x002500, 0x00000101);
+}
+
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+ mutex_lock(&nv_subdev(priv)->mutex);
+ nv50_fifo_playlist_update_locked(priv);
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -489,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object)
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
- nv50_fifo_playlist_update(priv);
+ nv50_fifo_playlist_update_locked(priv);
nv_wr32(priv, 0x003200, 0x00000001);
nv_wr32(priv, 0x003250, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 0a393f7f055..5a5961b6a6a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -218,7 +218,7 @@ struct nv04_display_class {
#define NV50_DISP_DAC_PWR_STATE 0x00000040
#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
-#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD 0x00020100
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
#define NV50_DISP_PIOR_MTHD 0x00030000
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ebf0a683305..dd5e01f89f2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nv50_disp *disp = nv50_disp(encoder->dev);
int ret, or = nouveau_encoder(encoder)->or;
- u32 load = 0;
+ u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (load == 0)
+ load = 340;
ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
if (ret || load != 7)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 44a7da66e08..8406c8251fb 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8546e3b333b..0f89ce3d02b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4754,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4923,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7969c0c8ec2..84583302b08 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4973bff37fe..d0314ecbd7c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -4024,9 +4030,6 @@ int r100_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c60350e6872..b9b776f1e58 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6fce2eb4dd1..4e796ecf9ea 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
if (r) {
return r;
}
- r = radeon_irq_kms_init(rdev);
- if (r) {
- return r;
- }
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f795a4e092c..e1aece73b37 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index b45e6484867..0e534169592 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0028FC_MC_DATA);
+ WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+ S_0028F8_MC_IND_WR_EN(1));
+ WREG32(R_0028FC_MC_DATA, v);
+ WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
static void r600_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+ }
}
+
radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -3202,6 +3245,12 @@ static int r600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3356,10 +3405,6 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index acb146c0697..79df558f8c4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1342,6 +1342,14 @@
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
+#define R_000011_K8_FB_LOCATION 0x11
+#define R_000012_MC_MISC_UMA_CNTL 0x12
+#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX 0x28F8
+#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
+#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA 0x28FC
#define R_008020_GRBM_SOFT_RESET 0x8020
#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 06b8c19ab19..a2802b47ee9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ rdev->mc_rreg = &rs780_mc_rreg;
+ rdev->mc_wreg = &rs780_mc_wreg;
+ }
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2c87365d345..a72759ede75 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 73051ce3121..233a9b9fa1f 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 46fa1b07c56..670b555d2ca 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1047,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ab4c86cfd55..55880d5962c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -776,9 +782,6 @@ int rs690_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffcba730c57..21c7d7b26e5 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -662,9 +668,6 @@ int rv515_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 08aef24afe4..4a62ad2e539 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1887,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2045,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d1ba9d88f31..a1b0da6b580 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5350,6 +5350,12 @@ static int si_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = si_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -5533,10 +5539,6 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e461e997245..7a4d1010690 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -6,6 +6,7 @@ config DRM_TILCDC
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dc3ae5c56f5..d39a5cede0b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
static void mt_free_input_name(struct hid_input *hi)
{
struct hid_device *hdev = hi->report->device;
+ const char *name = hi->input->name;
- if (hi->input->name != hdev->name)
- kfree(hi->input->name);
+ if (name != hdev->name) {
+ hi->input->name = hdev->name;
+ kfree(name);
+ }
}
static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
struct hid_input *hi;
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
- hid_hw_stop(hdev);
-
list_for_each_entry(hi, &hdev->inputs, list)
mt_free_input_name(hi);
+ hid_hw_stop(hdev);
+
kfree(td);
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 7e76922a4ba..f920619cd6d 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
+ if (man_id < 0 || dev_id < 0)
+ return -ENODEV;
+
if (man_id == 0x4d && dev_id == 0x01)
type_name = "max1617a";
else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
- else
+ else if ((dev_id & 0xF0) == 0x00)
type_name = "adm1021";
+ else
+ return -ENODEV;
} else if (man_id == 0x49)
type_name = "thmc10";
else if (man_id == 0x23)
type_name = "gl523sm";
else if (man_id == 0x54)
type_name = "mc1066";
- /* LM84 Mfr ID in a different place, and it has more unused bits */
- else if (conv_rate == 0x00
- && (config & 0x7F) == 0x00
- && (status & 0xAB) == 0x00)
- type_name = "lm84";
- else
- type_name = "max1617";
+ else {
+ int lte, rte, lhi, rhi, llo, rlo;
+
+ /* extra checks for LM84 and MAX1617 to avoid misdetections */
+
+ llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
+ rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
+
+ /* fail if any of the additional register reads failed */
+ if (llo < 0 || rlo < 0)
+ return -ENODEV;
+
+ lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
+ rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
+ lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
+ rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
+
+ /*
+ * Fail for negative temperatures and negative high limits.
+ * This check also catches read errors on the tested registers.
+ */
+ if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
+ return -ENODEV;
+
+ /* fail if all registers hold the same value */
+ if (lte == rte && lte == lhi && lte == rhi && lte == llo
+ && lte == rlo)
+ return -ENODEV;
+
+ /*
+ * LM84 Mfr ID is in a different place,
+ * and it has more unused bits.
+ */
+ if (conv_rate == 0x00
+ && (config & 0x7F) == 0x00
+ && (status & 0xAB) == 0x00) {
+ type_name = "lm84";
+ } else {
+ /* fail if low limits are larger than high limits */
+ if ((s8)llo > lhi || (s8)rlo > rhi)
+ return -ENODEV;
+ type_name = "max1617";
+ }
+ }
pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 81c7b73695d..3b9afccaaad 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
if (dma_region) {
struct qib_mregion *tmr;
- tmr = rcu_dereference(dev->dma_mr);
+ tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
qib_get_mr(mr);
rcu_assign_pointer(dev->dma_mr, mr);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f19b0998a53..2e84ef859c5 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
* maintained by openib-general@openib.org
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 06f578cde75..4f069c0d4c0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,6 +8,7 @@
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index a00ccd1ca33..b6d81a86c97 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 68ebb7fe072..7827baf455a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 5278916c310..2c4941d0656 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -292,10 +293,10 @@ out_err:
}
/**
- * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * releases the FMR pool and QP objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
int cq_index;
BUG_ON(ib_conn == NULL);
@@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
rdma_destroy_qp(ib_conn->cma_id);
}
- /* if cma handler context, the caller acts s.t the cma destroy the id */
- if (ib_conn->cma_id != NULL && can_destroy_id)
- rdma_destroy_id(ib_conn->cma_id);
ib_conn->fmr_pool = NULL;
ib_conn->qp = NULL;
- ib_conn->cma_id = NULL;
kfree(ib_conn->page_vec);
if (ib_conn->login_buf) {
@@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn, can_destroy_id);
+ iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
+ /* if cma handler context, the caller actually destroy the id */
+ if (ib_conn->cma_id != NULL && can_destroy_id) {
+ rdma_destroy_id(ib_conn->cma_id);
+ ib_conn->cma_id = NULL;
+ }
iscsi_destroy_endpoint(ib_conn->ep);
}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 29889bbdcc6..63b3d4eb0ef 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
- do {
- irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
- if (irqnr != 0x7f) {
- __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
- irqnr = irq_find_mapping(icoll_domain, irqnr);
- handle_IRQ(irqnr, regs);
- continue;
- }
- break;
- } while (1);
+ irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
+ __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+ irqnr = irq_find_mapping(icoll_domain, irqnr);
+ handle_IRQ(irqnr, regs);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 065b7a31a47..47a52ab580d 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(f->valid & BIT(hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_data(irq, f);
irq_set_chip_and_handler(irq, &f->chip,
handle_level_irq);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 884d11c7355..2bbb00404cf 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(v->valid_sources & (1 << hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
irq_set_chip_data(irq, v->base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 05c220d05e2..f950c9d29f3 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
config BCACHE
tristate "Block device as cache"
- select CLOSURES
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17..d3e15b42a4a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
-void bch_writeback_init_cached_dev(struct cached_dev *);
+void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2..b8730e714d6 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
};
static KTYPE(bch_stats);
-static void scale_accounting(unsigned long data);
-
-void bch_cache_accounting_init(struct cache_accounting *acc,
- struct closure *parent)
-{
- kobject_init(&acc->total.kobj, &bch_stats_ktype);
- kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
- kobject_init(&acc->hour.kobj, &bch_stats_ktype);
- kobject_init(&acc->day.kobj, &bch_stats_ktype);
-
- closure_init(&acc->cl, parent);
- init_timer(&acc->timer);
- acc->timer.expires = jiffies + accounting_delay;
- acc->timer.data = (unsigned long) acc;
- acc->timer.function = scale_accounting;
- add_timer(&acc->timer);
-}
-
int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
struct kobject *parent)
{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
}
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent)
+{
+ kobject_init(&acc->total.kobj, &bch_stats_ktype);
+ kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
+ kobject_init(&acc->hour.kobj, &bch_stats_ktype);
+ kobject_init(&acc->day.kobj, &bch_stats_ktype);
+
+ closure_init(&acc->cl, parent);
+ init_timer(&acc->timer);
+ acc->timer.expires = jiffies + accounting_delay;
+ acc->timer.data = (unsigned long) acc;
+ acc->timer.function = scale_accounting;
+ add_timer(&acc->timer);
+}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c8046bc4aa5..f88e2b653a3 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
return 0;
}
-static int release_dev(struct gendisk *b, fmode_t mode)
+static void release_dev(struct gendisk *b, fmode_t mode)
{
struct bcache_device *d = b->private_data;
closure_put(&d->cl);
- return 0;
}
static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
if (d->c)
bcache_device_detach(d);
-
- if (d->disk)
+ if (d->disk && d->disk->flags & GENHD_FL_UP)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
- bio_split_pool_init(&d->bio_split_hook))
-
- return -ENOMEM;
-
- d->disk = alloc_disk(1);
- if (!d->disk)
+ bio_split_pool_init(&d->bio_split_hook) ||
+ !(d->disk = alloc_disk(1)) ||
+ !(q = blk_alloc_queue(GFP_KERNEL)))
return -ENOMEM;
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
- q = blk_alloc_queue(GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
mutex_lock(&bch_register_lock);
- bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ if (atomic_read(&dc->running))
+ bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
list_del(&dc->list);
mutex_unlock(&bch_register_lock);
if (!IS_ERR_OR_NULL(dc->bdev)) {
- blk_sync_queue(bdev_get_queue(dc->bdev));
+ if (dc->bdev->bd_disk)
+ blk_sync_queue(bdev_get_queue(dc->bdev));
+
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
{
- int err;
+ int ret;
struct io *io;
-
- closure_init(&dc->disk.cl, NULL);
- set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+ struct request_queue *q = bdev_get_queue(dc->bdev);
__module_get(THIS_MODULE);
INIT_LIST_HEAD(&dc->list);
+ closure_init(&dc->disk.cl, NULL);
+ set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
-
- bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
-
- err = bcache_device_init(&dc->disk, block_size);
- if (err)
- goto err;
-
- spin_lock_init(&dc->io_lock);
- closure_init_unlocked(&dc->sb_write);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
+ closure_init_unlocked(&dc->sb_write);
+ INIT_LIST_HEAD(&dc->io_lru);
+ spin_lock_init(&dc->io_lock);
+ bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
- INIT_LIST_HEAD(&dc->io_lru);
- dc->sb_bio.bi_max_vecs = 1;
- dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
-
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
list_add(&io->lru, &dc->io_lru);
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
- bch_writeback_init_cached_dev(dc);
+ ret = bcache_device_init(&dc->disk, block_size);
+ if (ret)
+ return ret;
+
+ set_capacity(dc->disk.disk,
+ dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+
+ dc->disk.disk->queue->backing_dev_info.ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info.ra_pages,
+ q->backing_dev_info.ra_pages);
+
+ bch_cached_dev_request_init(dc);
+ bch_cached_dev_writeback_init(dc);
return 0;
-err:
- bcache_device_stop(&dc->disk);
- return err;
}
/* Cached device - bcache superblock */
-static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
+static void register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
- struct gendisk *g;
struct cache_set *c;
- if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
- return err;
-
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
- g = dc->disk.disk;
-
- set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
- g->queue->backing_dev_info.ra_pages =
- max(g->queue->backing_dev_info.ra_pages,
- bdev->bd_queue->backing_dev_info.ra_pages);
+ bio_init(&dc->sb_bio);
+ dc->sb_bio.bi_max_vecs = 1;
+ dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+ dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ get_page(sb_page);
- bch_cached_dev_request_init(dc);
+ if (cached_dev_init(dc, sb->block_size << 9))
+ goto err;
err = "error creating kobject";
if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
+ pr_info("registered backing device %s", bdevname(bdev, name));
+
list_add(&dc->list, &uncached_devices);
list_for_each_entry(c, &bch_cache_sets, list)
bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
bch_cached_dev_run(dc);
- return NULL;
+ return;
err:
- kobject_put(&dc->disk.kobj);
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- /*
- * Return NULL instead of an error because kobject_put() cleans
- * everything up
- */
- return NULL;
+ bcache_device_stop(&dc->disk);
}
/* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
size_t free;
struct bucket *b;
- if (!ca)
- return -ENOMEM;
-
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- memcpy(&ca->sb, sb, sizeof(struct cache_sb));
-
INIT_LIST_HEAD(&ca->discards);
- bio_init(&ca->sb_bio);
- ca->sb_bio.bi_max_vecs = 1;
- ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
-
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
!init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
- !(ca->buckets = vmalloc(sizeof(struct bucket) *
+ !(ca->buckets = vzalloc(sizeof(struct bucket) *
ca->sb.nbuckets)) ||
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
!(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
bio_split_pool_init(&ca->bio_split_hook))
- goto err;
+ return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
- memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
@@ -1766,22 +1741,28 @@ err:
return -ENOMEM;
}
-static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
+static void register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
- if (cache_alloc(sb, ca) != 0)
- return err;
-
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
+ bio_init(&ca->sb_bio);
+ ca->sb_bio.bi_max_vecs = 1;
+ ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+ ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ get_page(sb_page);
+
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
+ if (cache_alloc(sb, ca) != 0)
+ goto err;
+
err = "error creating kobject";
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
goto err;
pr_info("registered cache device %s", bdevname(bdev, name));
-
- return NULL;
+ return;
err:
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
kobject_put(&ca->kobj);
- pr_info("error opening %s: %s", bdevname(bdev, name), err);
- /* Return NULL instead of an error because kobject_put() cleans
- * everything up
- */
- return NULL;
}
/* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
sb);
- if (bdev == ERR_PTR(-EBUSY))
- err = "device busy";
-
- if (IS_ERR(bdev) ||
- set_blocksize(bdev, 4096))
+ if (IS_ERR(bdev)) {
+ if (bdev == ERR_PTR(-EBUSY))
+ err = "device busy";
goto err;
+ }
+
+ err = "failed to set blocksize";
+ if (set_blocksize(bdev, 4096))
+ goto err_close;
err = read_super(sb, bdev, &sb_page);
if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ goto err_close;
- err = register_bdev(sb, sb_page, bdev, dc);
+ register_bdev(sb, sb_page, bdev, dc);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca)
+ goto err_close;
- err = register_cache(sb, sb_page, bdev, ca);
+ register_cache(sb, sb_page, bdev, ca);
}
-
- if (err) {
- /* register_(bdev|cache) will only return an error if they
- * didn't get far enough to create the kobject - if they did,
- * the kobject destructor will do this cleanup.
- */
+out:
+ if (sb_page)
put_page(sb_page);
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- if (attr != &ksysfs_register_quiet)
- pr_info("error opening %s: %s", path, err);
- ret = -EINVAL;
- }
-
kfree(sb);
kfree(path);
mutex_unlock(&bch_register_lock);
module_put(THIS_MODULE);
return ret;
+
+err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+err:
+ if (attr != &ksysfs_register_quiet)
+ pr_info("error opening %s: %s", path, err);
+ ret = -EINVAL;
+ goto out;
}
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd..2714ed3991d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
refill_dirty(cl);
}
-void bch_writeback_init_cached_dev(struct cached_dev *dc)
+void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 681d1099a2d..9b82377a833 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55951182af7..6e17f8181c4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
r1_bio->bios[mirror] = NULL;
to_put = bio;
- set_bit(R1BIO_Uptodate, &r1_bio->state);
+ /*
+ * Do not set R1BIO_Uptodate if the current device is
+ * rebuilding or Faulty. This is because we cannot use
+ * such device for properly reading the data back (we could
+ * potentially use it, if the current write would have felt
+ * before rdev->recovery_offset, but for simplicity we don't
+ * check this here.
+ */
+ if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
+ !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static void freeze_array(struct r1conf *conf)
+static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
* We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+1
+ * wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
- * Thus the number queued (nr_queued) plus this request (1)
+ * Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
+ conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
struct md_rdev *repl =
conf->mirrors[conf->raid_disks + number].rdev;
- raise_barrier(conf);
+ freeze_array(conf, 0);
clear_bit(Replacement, &repl->flags);
p->rdev = repl;
conf->mirrors[conf->raid_disks + number].rdev = NULL;
- lower_barrier(conf);
+ unfreeze_array(conf);
clear_bit(WantReplacement, &rdev->flags);
} else
clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* frozen
*/
if (mddev->ro == 0) {
- freeze_array(conf);
+ freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
return PTR_ERR(conf);
if (mddev->queue)
- blk_queue_max_write_same_sectors(mddev->queue,
- mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
+
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
return -ENOMEM;
}
- raise_barrier(conf);
+ freeze_array(conf, 0);
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks = raid_disks;
mddev->delta_disks = 0;
- lower_barrier(conf);
+ unfreeze_array(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 59d4daa5f4c..6ddae2501b9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
- set_bit(R10BIO_Uptodate, &r10_bio->state);
+ /*
+ * Do not set R10BIO_Uptodate if the current device is
+ * rebuilding or Faulty. This is because we cannot use
+ * such device for properly reading the data back (we could
+ * potentially use it, if the current write would have felt
+ * before rdev->recovery_offset, but for simplicity we don't
+ * check this here.
+ */
+ if (test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
wake_up(&conf->wait_barrier);
}
-static void freeze_array(struct r10conf *conf)
+static void freeze_array(struct r10conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quiet.
* We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+1
+ * wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
- * Thus the number queued (nr_queued) plus this request (1)
+ * Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
+ conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
- raise_barrier(conf, 0);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
r10_bio->devs[slot].bio = NULL;
if (mddev->ro == 0) {
- freeze_array(conf);
+ freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue,
- mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9359828ffe2..05e4a105b9c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_FLUSH;
+ bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
rbi->bi_sector = (sh->sector
+ rrdev->data_offset);
+ rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
if (mddev->major_version == 0 &&
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
-
+
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
*/
mddev->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
+
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 713d89fedc4..f580d30bb78 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
+ flush_scheduled_work();
+
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- flush_scheduled_work();
-
mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 3adf8a70f26..d0c6907dfd9 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
mei_cl_unlink(ndev->cl_info);
kfree(ndev->cl_info);
}
+
+ memset(ndev, 0, sizeof(struct mei_nfc_dev));
}
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a727464e9c3..0f268329bd3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
+ mei_clear_interrupts(dev);
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 44d273c5e19..0535d1e0bc7 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
nodesperblade = 2;
else
nodesperblade = 1;
+ memset(&info, 0, sizeof(info));
info.cpus = num_online_cpus();
info.nodes = num_online_nodes();
info.blades = info.nodes / nodesperblade;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bc1246f6f86..3b31c19972d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -725,8 +725,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
- rcu_read_lock();
read_lock(&bond->lock);
+ rcu_read_lock();
bond_dev = bond->dev;
@@ -748,12 +748,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
+ rcu_read_unlock();
- if (--bond->igmp_retrans > 0)
+ /* We use curr_slave_lock to protect against concurrent access to
+ * igmp_retrans from multiple running instances of this function and
+ * bond_change_active_slave
+ */
+ write_lock_bh(&bond->curr_slave_lock);
+ if (bond->igmp_retrans > 1) {
+ bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+ }
+ write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
- rcu_read_unlock();
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1901,6 +1908,10 @@ err_free:
err_undo_flags:
bond_compute_features(bond);
+ /* Enslave of first slave has failed and we need to fix master's mac */
+ if (bond->slave_cnt == 0 &&
+ ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+ eth_hw_addr_random(bond_dev);
return res;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b38609b967b..c990b42cf7a 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@ struct bonding {
rwlock_t curr_slave_lock;
u8 send_peer_notif;
s8 setup_by_slave;
- s8 igmp_retrans;
+ u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f30..cbd388eea68 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
err = usb_8dev_cmd_version(priv, &version);
if (err) {
netdev_err(netdev, "can't get firmware version\n");
- goto cleanup_cmd_msg_buffer;
+ goto cleanup_unregister_candev;
} else {
netdev_info(netdev,
"firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
return 0;
+cleanup_unregister_candev:
+ unregister_netdev(priv->netdev);
+
cleanup_cmd_msg_buffer:
kfree(priv->cmd_msg_buffer);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cff..ad6aa1e9834 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
To compile this driver as a module, choose M here. The module
will be called atl1c.
+config ALX
+ tristate "Qualcomm Atheros AR816x/AR817x support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MDIO
+ help
+ This driver supports the Qualcomm Atheros L1F ethernet adapter,
+ i.e. the following chipsets:
+
+ 1969:1091 - AR8161 Gigabit Ethernet
+ 1969:1090 - AR8162 Fast Ethernet
+ 1969:10A1 - AR8171 Gigabit Ethernet
+ 1969:10A0 - AR8172 Fast Ethernet
+
+ To compile this driver as a module, choose M here. The module
+ will be called alx.
+
endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576f..5cf1c65bbce 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 00000000000..5901fa407d5
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 00000000000..50b3ae2b143
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME (5 * HZ)
+
+struct alx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+ struct alx_rrd *rrd;
+ dma_addr_t rrd_dma;
+
+ struct alx_rfd *rfd;
+ dma_addr_t rfd_dma;
+
+ struct alx_buffer *bufs;
+
+ u16 write_idx, read_idx;
+ u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH 32
+
+struct alx_tx_queue {
+ struct alx_txd *tpd;
+ dma_addr_t tpd_dma;
+ struct alx_buffer *bufs;
+ u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+ ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+ struct net_device *dev;
+
+ struct alx_hw hw;
+
+ /* all descriptor memory */
+ struct {
+ dma_addr_t dma;
+ void *virt;
+ int size;
+ } descmem;
+
+ /* protect int_mask updates */
+ spinlock_t irq_lock;
+ u32 int_mask;
+
+ int tx_ringsz;
+ int rx_ringsz;
+ int rxbuf_size;
+
+ struct napi_struct napi;
+ struct alx_tx_queue txq;
+ struct alx_rx_queue rxq;
+
+ struct work_struct link_check_wk;
+ struct work_struct reset_wk;
+
+ u16 msg_enable;
+
+ bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 00000000000..6fa2aec2bc8
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ ecmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause;
+ if (alx_hw_giga(hw))
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->advertising |= hw->adv_cfg;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (hw->flowctrl & ALX_FC_RX) {
+ ecmd->advertising |= ADVERTISED_Pause;
+
+ if (!(hw->flowctrl & ALX_FC_TX))
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ } else if (hw->flowctrl & ALX_FC_TX) {
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
+
+ if (hw->link_speed != SPEED_UNKNOWN) {
+ ethtool_cmd_speed_set(ecmd,
+ hw->link_speed - hw->link_speed % 10);
+ ecmd->duplex = hw->link_speed % 10;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u32 adv_cfg;
+
+ ASSERT_RTNL();
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+ return -EINVAL;
+ adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+ } else {
+ int speed = ethtool_cmd_speed(ecmd);
+
+ switch (speed + ecmd->duplex) {
+ case SPEED_10 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_10baseT_Half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_10baseT_Full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_100baseT_Half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ hw->adv_cfg = adv_cfg;
+ return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (hw->flowctrl & ALX_FC_ANEG &&
+ hw->adv_cfg & ADVERTISED_Autoneg)
+ pause->autoneg = AUTONEG_ENABLE;
+ else
+ pause->autoneg = AUTONEG_DISABLE;
+
+ if (hw->flowctrl & ALX_FC_TX)
+ pause->tx_pause = 1;
+ else
+ pause->tx_pause = 0;
+
+ if (hw->flowctrl & ALX_FC_RX)
+ pause->rx_pause = 1;
+ else
+ pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ int err = 0;
+ bool reconfig_phy = false;
+ u8 fc = 0;
+
+ if (pause->tx_pause)
+ fc |= ALX_FC_TX;
+ if (pause->rx_pause)
+ fc |= ALX_FC_RX;
+ if (pause->autoneg)
+ fc |= ALX_FC_ANEG;
+
+ ASSERT_RTNL();
+
+ /* restart auto-neg for auto-mode */
+ if (hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+ reconfig_phy = true;
+ if (fc & hw->flowctrl & ALX_FC_ANEG &&
+ (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ reconfig_phy = true;
+ }
+
+ if (reconfig_phy) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+ return err;
+ }
+
+ /* flow control on mac */
+ if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ alx_cfg_mac_flowcontrol(hw, fc);
+
+ hw->flowctrl = fc;
+
+ return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ hw->sleep_ctrl = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+ device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+ return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+ .get_settings = alx_get_settings,
+ .set_settings = alx_set_settings,
+ .get_pauseparam = alx_get_pauseparam,
+ .set_pauseparam = alx_set_pauseparam,
+ .get_drvinfo = alx_get_drvinfo,
+ .get_msglevel = alx_get_msglevel,
+ .set_msglevel = alx_set_msglevel,
+ .get_wol = alx_get_wol,
+ .set_wol = alx_set_wol,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 00000000000..220a16ad0e4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+ return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MDIO);
+ if (!(val & ALX_MDIO_BUSY))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 *phy_data)
+{
+ u32 val, clk_sel;
+ int err;
+
+ *phy_data = 0;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+ ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_OP_READ;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ err = alx_wait_mdio_idle(hw);
+ if (err)
+ return err;
+ val = alx_read_mem32(hw, ALX_MDIO);
+ *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+ return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 phy_data)
+{
+ u32 val, clk_sel;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_ext(hw, dev, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_ext(hw, dev, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_dbg(hw, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_dbg(hw, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+ u32 val;
+ u16 phy_val;
+
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ /* phy in reset */
+ if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ val = alx_read_mem32(hw, ALX_DRV);
+ val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+ if (ALX_DRV_PHY_UNKNOWN == val)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+ if (ALX_PHY_INITED == phy_val)
+ return val;
+
+ return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+ u32 read;
+ int i;
+
+ for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+ read = alx_read_mem32(hw, reg);
+ if ((read & wait) == 0) {
+ if (val)
+ *val = read;
+ return true;
+ }
+ mdelay(1);
+ }
+
+ return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 mac0, mac1;
+
+ mac0 = alx_read_mem32(hw, ALX_STAD0);
+ mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+ /* addr should be big-endian */
+ *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+ *(__be16 *)addr = cpu_to_be16(mac1);
+
+ return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 val;
+
+ /* try to get it from register first */
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from efuse */
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from flash/eeprom (if present) */
+ val = alx_read_mem32(hw, ALX_EFLD);
+ if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+ if (!alx_wait_reg(hw, ALX_EFLD,
+ ALX_EFLD_STAT | ALX_EFLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+ if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+ }
+
+ return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+ u32 val;
+
+ /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+ val = be32_to_cpu(*(__be32 *)(addr + 2));
+ alx_write_mem32(hw, ALX_STAD0, val);
+ val = be16_to_cpu(*(__be16 *)addr);
+ alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+ u32 val;
+
+ /* rising edge */
+ val = alx_read_mem32(hw, ALX_MISC);
+ alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+ u32 val, val2;
+
+ /* clear Internal OSC settings, switching OSC by hw itself */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+
+ /* 25M clk from chipset may be unstable 1s after de-assert of
+ * PERST, driver need re-calibrate before enter Sleep for WoL
+ */
+ val = alx_read_mem32(hw, ALX_MISC);
+ if (rev >= ALX_REV_B0) {
+ /* restore over current protection def-val,
+ * this val could be reset by MAC-RST
+ */
+ ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+ /* a 0->1 change will update the internal val of osc */
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ /* hw will automatically dis OSC after cab. */
+ val2 = alx_read_mem32(hw, ALX_MSIC2);
+ val2 &= ~ALX_MSIC2_CALB_START;
+ alx_write_mem32(hw, ALX_MSIC2, val2);
+ alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+ } else {
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ /* disable isolate for rev A devices */
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val);
+ }
+
+ udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+ u32 rxq, txq, val;
+ u16 i;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+ udelay(40);
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MAC_STS);
+ if (!(val & ALX_MAC_STS_IDLE))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+ u32 val, pmctrl;
+ int i, ret;
+ u8 rev;
+ bool a_cr;
+
+ pmctrl = 0;
+ rev = alx_hw_revision(hw);
+ a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+ /* disable all interrupts, RXQ/TXQ */
+ alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+ ret = alx_stop_mac(hw);
+ if (ret)
+ return ret;
+
+ /* mac reset workaroud */
+ alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+ /* dis l0s/l1 before mac reset */
+ if (a_cr) {
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL,
+ pmctrl & ~(ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_L0S_EN));
+ }
+
+ /* reset whole mac safely */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+ /* make sure it's real idle */
+ udelay(10);
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_RFD_PIDX);
+ if (val == 0)
+ break;
+ udelay(10);
+ }
+ for (; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+ break;
+ udelay(10);
+ }
+ if (i == ALX_DMA_MAC_RST_TO)
+ return -EIO;
+ udelay(10);
+
+ if (a_cr) {
+ alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+ /* restore l0s / l1 */
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+ }
+
+ alx_reset_osc(hw, rev);
+
+ /* clear Internal OSC settings, switching OSC by hw itself,
+ * disable isolate for rev A devices
+ */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+ val = alx_read_mem32(hw, ALX_MISC);
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ udelay(20);
+
+ /* driver control speed/duplex, hash-alg */
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ val = alx_read_mem32(hw, ALX_SERDES);
+ alx_write_mem32(hw, ALX_SERDES,
+ val | ALX_SERDES_MACCLK_SLWDWN |
+ ALX_SERDES_PHYCLK_SLWDWN);
+
+ return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+ int i;
+ u32 val;
+ u16 phy_val;
+
+ /* (DSP)reset PHY core */
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+ ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+ ALX_PHY_CTRL_CLS);
+ val |= ALX_PHY_CTRL_RST_ANALOG;
+
+ val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val);
+ udelay(10);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+ for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+ udelay(10);
+
+ /* phy power saving & hib */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+ ALX_SYSMODCTRL_IECHOADJ_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+ ALX_VDRVBIAS_DEF);
+
+ /* EEE advertisement */
+ val = alx_read_mem32(hw, ALX_LPI_CTRL);
+ alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+ /* phy power saving */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+ /* rtl8139c, 120m issue */
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+ ALX_MIIEXT_NLP78_120M_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_DEF);
+
+ if (hw->lnk_patch) {
+ /* Turn off half amplitude */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+ /* Turn off Green feature */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val | ALX_GREENCFG2_BP_GREEN);
+ /* Turn off half Bias */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+ }
+
+ /* set phy interrupt mask */
+ alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+ u8 rev = alx_hw_revision(hw);
+ u32 val;
+ u16 val16;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+ if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+ val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+ }
+
+ /* clear WoL setting/status */
+ val = alx_read_mem32(hw, ALX_WOL0);
+ alx_write_mem32(hw, ALX_WOL0, 0);
+
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+ /* mask some pcie error bits */
+ val = alx_read_mem32(hw, ALX_UE_SVRT);
+ val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+ alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+ /* wol 25M & pclk */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_PCLKSEL_SRDS |
+ ALX_MASTER_WAKEN_25M);
+ } else {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ (val & ~ALX_MASTER_PCLKSEL_SRDS) |
+ ALX_MASTER_WAKEN_25M);
+ }
+
+ /* ASPM setting */
+ alx_enable_aspm(hw, true, true);
+
+ udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+ u32 mac, txq, rxq;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+ mac = hw->rx_ctrl;
+ if (hw->link_speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ else
+ mac &= ~ALX_MAC_CTRL_FULLD;
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+ ALX_MAC_CTRL_SPEED_10_100);
+ mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+ if (fc & ALX_FC_RX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+ if (fc & ALX_FC_TX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+ u32 pmctrl;
+ u8 rev = alx_hw_revision(hw);
+
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+ ALX_PMCTRL_LCKDET_TIMER_DEF);
+ pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+ ALX_PMCTRL_L1_CLKSW_EN |
+ ALX_PMCTRL_L1_SRDSRX_PWD;
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+ pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+ ALX_PMCTRL_L1_SRDSPLL_EN |
+ ALX_PMCTRL_L1_BUFSRX_EN |
+ ALX_PMCTRL_SADLY_EN |
+ ALX_PMCTRL_HOTRST_WTEN|
+ ALX_PMCTRL_L0S_EN |
+ ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_ASPM_FCEN |
+ ALX_PMCTRL_TXL1_AFTER_L0S |
+ ALX_PMCTRL_RXL1_AFTER_L0S);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+ pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+ if (l0s_en)
+ pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+ if (l1_en)
+ pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+ u32 cfg = 0;
+
+ if (ethadv_cfg & ADVERTISED_Autoneg) {
+ cfg |= ALX_DRV_PHY_AUTO;
+ if (ethadv_cfg & ADVERTISED_10baseT_Half)
+ cfg |= ALX_DRV_PHY_10;
+ if (ethadv_cfg & ADVERTISED_10baseT_Full)
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_100baseT_Half)
+ cfg |= ALX_DRV_PHY_100;
+ if (ethadv_cfg & ADVERTISED_100baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+ cfg |= ALX_DRV_PHY_1000;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_Pause)
+ cfg |= ADVERTISE_PAUSE_CAP;
+ if (ethadv_cfg & ADVERTISED_Asym_Pause)
+ cfg |= ADVERTISE_PAUSE_ASYM;
+ } else {
+ switch (ethadv_cfg) {
+ case ADVERTISED_10baseT_Half:
+ cfg |= ALX_DRV_PHY_10;
+ break;
+ case ADVERTISED_100baseT_Half:
+ cfg |= ALX_DRV_PHY_100;
+ break;
+ case ADVERTISED_10baseT_Full:
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ break;
+ case ADVERTISED_100baseT_Full:
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ break;
+ }
+ }
+
+ return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+ u16 adv, giga, cr;
+ u32 val;
+ int err = 0;
+
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+ val = alx_read_mem32(hw, ALX_DRV);
+ ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+ if (ethadv & ADVERTISED_Autoneg) {
+ adv = ADVERTISE_CSMA;
+ adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+ if (flowctrl & ALX_FC_ANEG) {
+ if (flowctrl & ALX_FC_RX) {
+ adv |= ADVERTISED_Pause;
+ if (!(flowctrl & ALX_FC_TX))
+ adv |= ADVERTISED_Asym_Pause;
+ } else if (flowctrl & ALX_FC_TX) {
+ adv |= ADVERTISED_Asym_Pause;
+ }
+ }
+ giga = 0;
+ if (alx_hw_giga(hw))
+ giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+ cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+ if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+ alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+ alx_write_phy_reg(hw, MII_BMCR, cr))
+ err = -EBUSY;
+ } else {
+ cr = BMCR_RESET;
+ if (ethadv == ADVERTISED_100baseT_Half ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_SPEED100;
+ if (ethadv == ADVERTISED_10baseT_Full ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_FULLDPLX;
+
+ err = alx_write_phy_reg(hw, MII_BMCR, cr);
+ }
+
+ if (!err) {
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+ val |= ethadv_to_hw_cfg(hw, ethadv);
+ }
+
+ alx_write_mem32(hw, ALX_DRV, val);
+
+ return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+ u16 phy_val, len, agc;
+ u8 revid = alx_hw_revision(hw);
+ bool adj_th = revid == ALX_REV_B0;
+ int speed;
+
+ if (hw->link_speed == SPEED_UNKNOWN)
+ speed = SPEED_UNKNOWN;
+ else
+ speed = hw->link_speed - hw->link_speed % 10;
+
+ if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+ return;
+
+ /* 1000BT/AZ, wrong cable length */
+ if (speed != SPEED_UNKNOWN) {
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+ &phy_val);
+ len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+ agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+ if ((speed == SPEED_1000 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+ (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+ (speed == SPEED_100 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+ (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_LONG);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val | ALX_AFE_10BT_100M_TH);
+ } else {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_DEF);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_AFE, &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+ }
+
+ /* threshold adjust */
+ if (adj_th && hw->lnk_patch) {
+ if (speed == SPEED_100) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_UP);
+ } else if (speed == SPEED_1000) {
+ /*
+ * Giga link threshold, raise the tolerance of
+ * noise 50%
+ */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_HI);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ phy_val);
+ }
+ }
+ } else {
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+
+ if (adj_th && hw->lnk_patch) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_DOWN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+ }
+ }
+}
+
+
+/* NOTE:
+ * 1. phy link must be established before calling this function
+ * 2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+ u32 master, mac, phy, val;
+ int err = 0;
+
+ master = alx_read_mem32(hw, ALX_MASTER);
+ master &= ~ALX_MASTER_PCLKSEL_SRDS;
+ mac = hw->rx_ctrl;
+ /* 10/100 half */
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
+ mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+ phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+ phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+ phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+ ALX_PHY_CTRL_HIB_EN;
+
+ /* without any activity */
+ if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+ } else {
+ if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+ mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+ if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+ mac |= ALX_MAC_CTRL_TX_EN;
+ if (speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ if (speed >= SPEED_1000)
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ ALX_MAC_CTRL_SPEED_1000);
+ phy |= ALX_PHY_CTRL_DSPRST_OUT;
+ err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_SL);
+ if (err)
+ return err;
+ }
+
+ alx_enable_osc(hw);
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MASTER, master);
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+ alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+ /* set val of PDLL D3PLLOFF */
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+ return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+ u32 cfg, hw_cfg;
+
+ cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+ cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+ hw_cfg = alx_get_phy_config(hw);
+
+ if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+ return false;
+
+ return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u16 bmsr, giga;
+ int err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ *speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ /* speed/duplex result is saved in PHY Specific Status Register */
+ err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+ if (err)
+ return err;
+
+ if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+ goto wrong_speed;
+
+ switch (giga & ALX_GIGA_PSSR_SPEED) {
+ case ALX_GIGA_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case ALX_GIGA_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case ALX_GIGA_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ goto wrong_speed;
+ }
+
+ *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ return 1;
+
+wrong_speed:
+ dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+ return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+ u16 isr;
+
+ /* clear interrupt status by reading it */
+ return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+ u32 wol = 0;
+ int err = 0;
+
+ /* turn on magic packet event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+ /* turn on link up event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+ wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+ /* only link up can wake up */
+ err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+ }
+ alx_write_mem32(hw, ALX_WOL0, wol);
+
+ return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+ u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+ ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+ alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+ u32 val, raw_mtu, max_payload;
+ u16 val16;
+ u8 chip_rev = alx_hw_revision(hw);
+
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+ /* idle timeout to switch clk_125M */
+ if (chip_rev >= ALX_REV_B0)
+ alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+ ALX_IDLE_DECISN_TIMER_DEF);
+
+ alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+ val = alx_read_mem32(hw, ALX_MASTER);
+ val |= ALX_MASTER_IRQMOD2_EN |
+ ALX_MASTER_IRQMOD1_EN |
+ ALX_MASTER_SYSALVTIMER_EN;
+ alx_write_mem32(hw, ALX_MASTER, val);
+ alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+ (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+ /* intr re-trig timeout */
+ alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+ /* tpd threshold to trig int */
+ alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+ alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+ raw_mtu = hw->mtu + ETH_HLEN;
+ alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+ if (raw_mtu > ALX_MTU_JUMBO_TH)
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+ if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+ val = (raw_mtu + 8 + 7) >> 3;
+ else
+ val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+ alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+ max_payload = pcie_get_readrq(hw->pdev) >> 8;
+ /*
+ * if BIOS had changed the default dma read max length,
+ * restore it to default value
+ */
+ if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+ pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+ ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+ ALX_TXQ0_SUPT_IPOPT |
+ ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+ alx_write_mem32(hw, ALX_TXQ0, val);
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+ ALX_HQTPD_BURST_EN;
+ alx_write_mem32(hw, ALX_HQTPD, val);
+
+ /* rxq, flow control */
+ val = alx_read_mem32(hw, ALX_SRAM5);
+ val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+ if (val > ALX_SRAM_RXF_LEN_8K) {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+ } else {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_MTU_STD_ALGN) >> 3;
+ }
+ alx_write_mem32(hw, ALX_RXQ2,
+ val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+ val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+ val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+ ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+ ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+ ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+ ALX_RXQ0_IPV6_PARSE_EN;
+
+ if (alx_hw_giga(hw))
+ ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+ ALX_RXQ0_ASPM_THRESH_100M);
+
+ alx_write_mem32(hw, ALX_RXQ0, val);
+
+ val = alx_read_mem32(hw, ALX_DMA);
+ val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+ ALX_DMA_RREQ_PRI_DATA |
+ max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+ ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+ ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+ (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+ alx_write_mem32(hw, ALX_DMA, val);
+
+ /* default multi-tx-q weights */
+ val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+ 4 << ALX_WRR_PRI0_SHIFT |
+ 4 << ALX_WRR_PRI1_SHIFT |
+ 4 << ALX_WRR_PRI2_SHIFT |
+ 4 << ALX_WRR_PRI3_SHIFT;
+ alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return ADVERTISED_1000baseT_Full;
+ case SPEED_100 + DUPLEX_FULL:
+ return ADVERTISED_100baseT_Full;
+ case SPEED_100 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ case SPEED_10 + DUPLEX_FULL:
+ return ADVERTISED_10baseT_Full;
+ case SPEED_10 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ default:
+ return 0;
+ }
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+ int i, err, spd;
+ u16 lpa;
+
+ err = alx_get_phy_link(hw, &spd);
+ if (err < 0)
+ return err;
+
+ if (spd == SPEED_UNKNOWN)
+ return 0;
+
+ err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+ if (err)
+ return err;
+
+ if (!(lpa & LPA_LPACK)) {
+ *speed = spd;
+ return 0;
+ }
+
+ if (lpa & LPA_10FULL)
+ *speed = SPEED_10 + DUPLEX_FULL;
+ else if (lpa & LPA_10HALF)
+ *speed = SPEED_10 + DUPLEX_HALF;
+ else if (lpa & LPA_100FULL)
+ *speed = SPEED_100 + DUPLEX_FULL;
+ else
+ *speed = SPEED_100 + DUPLEX_HALF;
+
+ if (*speed != spd) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ err = alx_setup_speed_duplex(hw,
+ alx_speed_to_ethadv(*speed) |
+ ADVERTISED_Autoneg,
+ ALX_FC_ANEG | ALX_FC_RX |
+ ALX_FC_TX);
+ if (err)
+ return err;
+
+ /* wait for linkup */
+ for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+ int speed2;
+
+ msleep(100);
+
+ err = alx_get_phy_link(hw, &speed2);
+ if (err < 0)
+ return err;
+ if (speed2 != SPEED_UNKNOWN)
+ break;
+ }
+ if (i == ALX_MAX_SETUP_LNK_CYCLE)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+ u16 devs1, devs2;
+
+ if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+ alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+ return false;
+
+ /* since we haven't PMA/PMD status2 register, we can't
+ * use mdio45_probe function for prtad and mmds.
+ * use fixed MMD3 to get mmds.
+ */
+ if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+ alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+ return false;
+ hw->mdio.mmds = devs1 | devs2 << 16;
+
+ return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 00000000000..65e723d2172
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | vlan-tag | buf length |
+ * +----------------+----------------+
+ * | Word 1 |
+ * +----------------+----------------+
+ * | Word 2: buf addr lo |
+ * +----------------+----------------+
+ * | Word 3: buf addr hi |
+ * +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ * for the skb is special for LSO V2, Word 2 become total skb length ,
+ * Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ * checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ * 0-+ 0-+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | Payload offset 3 | L4 header offset
+ * 4 | (7:0) 4 | (7:0)
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7-+ 7-+
+ * 8 Custom csum enable = 1 8 Custom csum enable = 0
+ * 9 General IPv4 checksum 9 General IPv4 checksum
+ * 10 General TCP checksum 10 General TCP checksum
+ * 11 General UDP checksum 11 General UDP checksum
+ * 12 Large Send Segment enable 12 Large Send Segment enable
+ * 13 Large Send Segment type 13 Large Send Segment type
+ * 14 VLAN tagged 14 VLAN tagged
+ * 15 Insert VLAN tag 15 Insert VLAN tag
+ * 16 IPv4 packet 16 IPv4 packet
+ * 17 Ethernet frame type 17 Ethernet frame type
+ * 18-+ 18-+
+ * 19 | 19 |
+ * 20 | 20 |
+ * 21 | Custom csum offset 21 |
+ * 22 | (25:18) 22 |
+ * 23 | 23 | MSS (30:18)
+ * 24 | 24 |
+ * 25-+ 25 |
+ * 26-+ 26 |
+ * 27 | 27 |
+ * 28 | Reserved 28 |
+ * 29 | 29 |
+ * 30-+ 30-+
+ * 31 End of packet 31 End of packet
+ */
+struct alx_txd {
+ __le16 len;
+ __le16 vlan_tag;
+ __le32 word1;
+ union {
+ __le64 addr;
+ struct {
+ __le32 pkt_len;
+ __le32 resvd;
+ } l;
+ } adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK 0x00FF
+#define TPD_CXSUMSTART_SHIFT 0
+#define TPD_L4HDROFFSET_MASK 0x00FF
+#define TPD_L4HDROFFSET_SHIFT 0
+#define TPD_CXSUM_EN_MASK 0x0001
+#define TPD_CXSUM_EN_SHIFT 8
+#define TPD_IP_XSUM_MASK 0x0001
+#define TPD_IP_XSUM_SHIFT 9
+#define TPD_TCP_XSUM_MASK 0x0001
+#define TPD_TCP_XSUM_SHIFT 10
+#define TPD_UDP_XSUM_MASK 0x0001
+#define TPD_UDP_XSUM_SHIFT 11
+#define TPD_LSO_EN_MASK 0x0001
+#define TPD_LSO_EN_SHIFT 12
+#define TPD_LSO_V2_MASK 0x0001
+#define TPD_LSO_V2_SHIFT 13
+#define TPD_VLTAGGED_MASK 0x0001
+#define TPD_VLTAGGED_SHIFT 14
+#define TPD_INS_VLTAG_MASK 0x0001
+#define TPD_INS_VLTAG_SHIFT 15
+#define TPD_IPV4_MASK 0x0001
+#define TPD_IPV4_SHIFT 16
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 17
+#define TPD_CXSUMOFFSET_MASK 0x00FF
+#define TPD_CXSUMOFFSET_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 18
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+ __le64 addr; /* data buffer address, length is
+ * declared in register --- every
+ * buffer has the same size
+ */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | Word 0 |
+ * +----------------+----------------+
+ * | Word 1: RSS Hash value |
+ * +----------------+----------------+
+ * | Word 2 |
+ * +----------------+----------------+
+ * | Word 3 |
+ * +----------------+----------------+
+ *
+ * Word 0 depiction & Word 2 depiction:
+ *
+ * 0--+ 0--+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | 3 |
+ * 4 | 4 |
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7 | IP payload checksum 7 | VLAN tag
+ * 8 | (15:0) 8 | (15:0)
+ * 9 | 9 |
+ * 10 | 10 |
+ * 11 | 11 |
+ * 12 | 12 |
+ * 13 | 13 |
+ * 14 | 14 |
+ * 15-+ 15-+
+ * 16-+ 16-+
+ * 17 | Number of RFDs 17 |
+ * 18 | (19:16) 18 |
+ * 19-+ 19 | Protocol ID
+ * 20-+ 20 | (23:16)
+ * 21 | 21 |
+ * 22 | 22 |
+ * 23 | 23-+
+ * 24 | 24 | Reserved
+ * 25 | Start index of RFD-ring 25-+
+ * 26 | (31:20) 26 | RSS Q-num (27:25)
+ * 27 | 27-+
+ * 28 | 28-+
+ * 29 | 29 | RSS Hash algorithm
+ * 30 | 30 | (31:28)
+ * 31-+ 31-+
+ *
+ * Word 3 depiction:
+ *
+ * 0--+
+ * 1 |
+ * 2 |
+ * 3 |
+ * 4 |
+ * 5 |
+ * 6 |
+ * 7 | Packet length (include FCS)
+ * 8 | (13:0)
+ * 9 |
+ * 10 |
+ * 11 |
+ * 12 |
+ * 13-+
+ * 14 L4 Header checksum error
+ * 15 IPv4 checksum error
+ * 16 VLAN tagged
+ * 17-+
+ * 18 | Protocol ID (19:17)
+ * 19-+
+ * 20 Receive error summary
+ * 21 FCS(CRC) error
+ * 22 Frame alignment error
+ * 23 Truncated packet
+ * 24 Runt packet
+ * 25 Incomplete packet due to insufficient rx-desc
+ * 26 Broadcast packet
+ * 27 Multicast packet
+ * 28 Ethernet type (EII or 802.3)
+ * 29 FIFO overflow
+ * 30 Length error (for 802.3, length field mismatch with actual len)
+ * 31 Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+ __le32 word0;
+ __le32 rss_hash;
+ __le32 word2;
+ __le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK 0xFFFF
+#define RRD_XSUM_SHIFT 0
+#define RRD_NOR_MASK 0x000F
+#define RRD_NOR_SHIFT 16
+#define RRD_SI_MASK 0x0FFF
+#define RRD_SI_SHIFT 20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK 0xFFFF
+#define RRD_VLTAG_SHIFT 0
+#define RRD_PID_MASK 0x00FF
+#define RRD_PID_SHIFT 16
+/* non-ip packet */
+#define RRD_PID_NONIP 0
+/* ipv4(only) */
+#define RRD_PID_IPV4 1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP 2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP 3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP 4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP 5
+/* ipv6(only) */
+#define RRD_PID_IPV6 6
+/* LLDP packet */
+#define RRD_PID_LLDP 7
+/* 1588 packet */
+#define RRD_PID_1588 8
+#define RRD_RSSQ_MASK 0x0007
+#define RRD_RSSQ_SHIFT 25
+#define RRD_RSSALG_MASK 0x000F
+#define RRD_RSSALG_SHIFT 28
+#define RRD_RSSALG_TCPV6 0x1
+#define RRD_RSSALG_IPV6 0x2
+#define RRD_RSSALG_TCPV4 0x4
+#define RRD_RSSALG_IPV4 0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK 0x3FFF
+#define RRD_PKTLEN_SHIFT 0
+#define RRD_ERR_L4_MASK 0x0001
+#define RRD_ERR_L4_SHIFT 14
+#define RRD_ERR_IPV4_MASK 0x0001
+#define RRD_ERR_IPV4_SHIFT 15
+#define RRD_VLTAGGED_MASK 0x0001
+#define RRD_VLTAGGED_SHIFT 16
+#define RRD_OLD_PID_MASK 0x0007
+#define RRD_OLD_PID_SHIFT 17
+#define RRD_ERR_RES_MASK 0x0001
+#define RRD_ERR_RES_SHIFT 20
+#define RRD_ERR_FCS_MASK 0x0001
+#define RRD_ERR_FCS_SHIFT 21
+#define RRD_ERR_FAE_MASK 0x0001
+#define RRD_ERR_FAE_SHIFT 22
+#define RRD_ERR_TRUNC_MASK 0x0001
+#define RRD_ERR_TRUNC_SHIFT 23
+#define RRD_ERR_RUNT_MASK 0x0001
+#define RRD_ERR_RUNT_SHIFT 24
+#define RRD_ERR_ICMP_MASK 0x0001
+#define RRD_ERR_ICMP_SHIFT 25
+#define RRD_BCAST_MASK 0x0001
+#define RRD_BCAST_SHIFT 26
+#define RRD_MCAST_MASK 0x0001
+#define RRD_MCAST_SHIFT 27
+#define RRD_ETHTYPE_MASK 0x0001
+#define RRD_ETHTYPE_SHIFT 28
+#define RRD_ERR_FIFOV_MASK 0x0001
+#define RRD_ERR_FIFOV_SHIFT 29
+#define RRD_ERR_LEN_MASK 0x0001
+#define RRD_ERR_LEN_SHIFT 30
+#define RRD_UPDATED_MASK 0x0001
+#define RRD_UPDATED_SHIFT 31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE 50
+
+/* for FlowControl */
+#define ALX_FC_RX 0x01
+#define ALX_FC_TX 0x02
+#define ALX_FC_ANEG 0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY 0x00000001
+#define ALX_SLEEP_WOL_MAGIC 0x00000002
+#define ALX_SLEEP_CIFS 0x00000004
+#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
+ ALX_SLEEP_WOL_MAGIC | \
+ ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4 0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
+#define ALX_RSS_HASH_TYPE_IPV6 0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
+#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
+ ALX_RSS_HASH_TYPE_IPV4_TCP | \
+ ALX_RSS_HASH_TYPE_IPV6 | \
+ ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE 1536
+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
+#define ALX_MAX_TSO_PKT_SIZE (7*1024)
+#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE 68
+#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES 8
+#define ALX_MAX_TX_QUEUES 4
+#define ALX_MAX_HANDLED_INTRS 5
+
+#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | \
+ ALX_ISR_DMAR | \
+ ALX_ISR_SMB | \
+ ALX_ISR_MANU | \
+ ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
+ ALX_ISR_TXF_UR | \
+ ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
+ ALX_ISR_TX_Q1 | \
+ ALX_ISR_TX_Q2 | \
+ ALX_ISR_TX_Q3 | \
+ ALX_ISR_RX_Q0 | \
+ ALX_ISR_RX_Q1 | \
+ ALX_ISR_RX_Q2 | \
+ ALX_ISR_RX_Q3 | \
+ ALX_ISR_RX_Q4 | \
+ ALX_ISR_RX_Q5 | \
+ ALX_ISR_RX_Q6 | \
+ ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS 16
+
+#define ALX_GET_FIELD(_data, _field) \
+ (((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value) do { \
+ (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
+ (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+ } while (0)
+
+struct alx_hw {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+
+ /* current & permanent mac addr */
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ u16 mtu;
+ u16 imt;
+ u8 dma_chnl;
+ u8 max_dma_chnl;
+ /* tpd threshold to trig INT */
+ u32 ith_tpd;
+ u32 rx_ctrl;
+ u32 mc_hash[2];
+
+ u32 smb_timer;
+ /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+ int link_speed;
+
+ /* auto-neg advertisement or force mode config */
+ u32 adv_cfg;
+ u8 flowctrl;
+
+ u32 sleep_ctrl;
+
+ spinlock_t mdio_lock;
+ struct mdio_if_info mdio;
+ u16 phy_id[2];
+
+ /* PHY link patch flag */
+ bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+ return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+ return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+ return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+ writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+ writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+ return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+ readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 00000000000..418de8b1316
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+ struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+ if (dma_unmap_len(txb, size)) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(txb, dma),
+ dma_unmap_len(txb, size),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(txb, size, 0);
+ }
+
+ if (txb->skb) {
+ dev_kfree_skb_any(txb->skb);
+ txb->skb = NULL;
+ }
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct sk_buff *skb;
+ struct alx_buffer *cur_buf;
+ dma_addr_t dma;
+ u16 cur, next, count = 0;
+
+ next = cur = rxq->write_idx;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+
+ while (!cur_buf->skb && next != rxq->read_idx) {
+ struct alx_rfd *rfd = &rxq->rfd[cur];
+
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ if (!skb)
+ break;
+ dma = dma_map_single(&alx->hw.pdev->dev,
+ skb->data, alx->rxbuf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ /* Unfortunately, RX descriptor buffers must be 4-byte
+ * aligned, so we can't use IP alignment.
+ */
+ if (WARN_ON(dma & 3)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ cur_buf->skb = skb;
+ dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+ dma_unmap_addr_set(cur_buf, dma, dma);
+ rfd->addr = cpu_to_le64(dma);
+
+ cur = next;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+ count++;
+ }
+
+ if (count) {
+ /* flush all updates before updating hardware */
+ wmb();
+ rxq->write_idx = cur;
+ alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+ }
+
+ return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+
+ if (txq->write_idx >= txq->read_idx)
+ return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+ return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ u16 hw_read_idx, sw_read_idx;
+ unsigned int total_bytes = 0, total_packets = 0;
+ int budget = ALX_DEFAULT_TX_WORK;
+
+ sw_read_idx = txq->read_idx;
+ hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+ if (sw_read_idx != hw_read_idx) {
+ while (sw_read_idx != hw_read_idx && budget > 0) {
+ struct sk_buff *skb;
+
+ skb = txq->bufs[sw_read_idx].skb;
+ if (skb) {
+ total_bytes += skb->len;
+ total_packets++;
+ budget--;
+ }
+
+ alx_free_txbuf(alx, sw_read_idx);
+
+ if (++sw_read_idx == alx->tx_ringsz)
+ sw_read_idx = 0;
+ }
+ txq->read_idx = sw_read_idx;
+
+ netdev_completed_queue(alx->dev, total_packets, total_bytes);
+ }
+
+ if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+ alx_tpd_avail(alx) > alx->tx_ringsz/4)
+ netif_wake_queue(alx->dev);
+
+ return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+ schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+ schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_rrd *rrd;
+ struct alx_buffer *rxb;
+ struct sk_buff *skb;
+ u16 length, rfd_cleaned = 0;
+
+ while (budget > 0) {
+ rrd = &rxq->rrd[rxq->rrd_read_idx];
+ if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+ break;
+ rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+ if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_SI) != rxq->read_idx ||
+ ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_NOR) != 1) {
+ alx_schedule_reset(alx);
+ return 0;
+ }
+
+ rxb = &rxq->bufs[rxq->read_idx];
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(rxb, dma),
+ dma_unmap_len(rxb, size),
+ DMA_FROM_DEVICE);
+ dma_unmap_len_set(rxb, size, 0);
+ skb = rxb->skb;
+ rxb->skb = NULL;
+
+ if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+ rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+ rrd->word3 = 0;
+ dev_kfree_skb_any(skb);
+ goto next_pkt;
+ }
+
+ length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+ RRD_PKTLEN) - ETH_FCS_LEN;
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, alx->dev);
+
+ skb_checksum_none_assert(skb);
+ if (alx->dev->features & NETIF_F_RXCSUM &&
+ !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+ cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+ switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+ RRD_PID)) {
+ case RRD_PID_IPV6UDP:
+ case RRD_PID_IPV4UDP:
+ case RRD_PID_IPV4TCP:
+ case RRD_PID_IPV6TCP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ }
+
+ napi_gro_receive(&alx->napi, skb);
+ budget--;
+
+next_pkt:
+ if (++rxq->read_idx == alx->rx_ringsz)
+ rxq->read_idx = 0;
+ if (++rxq->rrd_read_idx == alx->rx_ringsz)
+ rxq->rrd_read_idx = 0;
+
+ if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+ rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+ }
+
+ if (rfd_cleaned)
+ alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+ return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+ struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ struct alx_hw *hw = &alx->hw;
+ bool complete = true;
+ unsigned long flags;
+
+ complete = alx_clean_tx_irq(alx) &&
+ alx_clean_rx_irq(alx, budget);
+
+ if (!complete)
+ return 1;
+
+ napi_complete(&alx->napi);
+
+ /* enable interrupt */
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ alx_post_write(hw);
+
+ return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+ bool write_int_mask = false;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (intr & ALX_ISR_FATAL) {
+ netif_warn(alx, hw, alx->dev,
+ "fatal interrupt 0x%x, resetting\n", intr);
+ alx_schedule_reset(alx);
+ goto out;
+ }
+
+ if (intr & ALX_ISR_ALERT)
+ netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+ if (intr & ALX_ISR_PHY) {
+ /* suppress PHY interrupt, because the source
+ * is from PHY internal. only the internal status
+ * is cleared, the interrupt status could be cleared.
+ */
+ alx->int_mask &= ~ALX_ISR_PHY;
+ write_int_mask = true;
+ alx_schedule_link_check(alx);
+ }
+
+ if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+ napi_schedule(&alx->napi);
+ /* mask rx/tx interrupt, enable them when napi complete */
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ write_int_mask = true;
+ }
+
+ if (write_int_mask)
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+ alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+ spin_unlock(&alx->irq_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+
+ return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ intr = alx_read_mem32(hw, ALX_ISR);
+
+ if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+ return IRQ_NONE;
+
+ return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+ alx->rxq.read_idx = 0;
+ alx->rxq.write_idx = 0;
+ alx->rxq.rrd_read_idx = 0;
+ alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+ alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+ alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+ alx->txq.read_idx = 0;
+ alx->txq.write_idx = 0;
+ alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+ alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+ /* load these pointers into the chip */
+ alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ int i;
+
+ if (!txq->bufs)
+ return;
+
+ for (i = 0; i < alx->tx_ringsz; i++)
+ alx_free_txbuf(alx, i);
+
+ memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+ memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+ txq->write_idx = 0;
+ txq->read_idx = 0;
+
+ netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_buffer *cur_buf;
+ u16 i;
+
+ if (rxq == NULL)
+ return;
+
+ for (i = 0; i < alx->rx_ringsz; i++) {
+ cur_buf = rxq->bufs + i;
+ if (cur_buf->skb) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(cur_buf, dma),
+ dma_unmap_len(cur_buf, size),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(cur_buf->skb);
+ cur_buf->skb = NULL;
+ dma_unmap_len_set(cur_buf, size, 0);
+ dma_unmap_addr_set(cur_buf, dma, 0);
+ }
+ }
+
+ rxq->write_idx = 0;
+ rxq->read_idx = 0;
+ rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+ alx_free_txring_buf(alx);
+ alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+ alx_free_buffers(alx);
+
+ alx_init_ring_ptrs(alx);
+
+ if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+ u32 crc32, bit, reg;
+
+ crc32 = ether_crc(ETH_ALEN, addr);
+ reg = (crc32 >> 31) & 0x1;
+ bit = (crc32 >> 26) & 0x1F;
+
+ mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct netdev_hw_addr *ha;
+ u32 mc_hash[2] = {};
+
+ if (!(netdev->flags & IFF_ALLMULTI)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+ alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+ alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+ }
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+ if (netdev->flags & IFF_PROMISC)
+ hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+ __alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+ netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+ alx->txq.bufs = kcalloc(alx->tx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->txq.bufs)
+ return -ENOMEM;
+
+ alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->rxq.bufs)
+ goto out_free;
+
+ /* physical tx/rx ring descriptors
+ *
+ * Allocate them as a single chunk because they must not cross a
+ * 4G boundary (hardware has a single register for high 32 bits
+ * of addresses only)
+ */
+ alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz +
+ sizeof(struct alx_rfd) * alx->rx_ringsz;
+ alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma,
+ GFP_KERNEL);
+ if (!alx->descmem.virt)
+ goto out_free;
+
+ alx->txq.tpd = (void *)alx->descmem.virt;
+ alx->txq.tpd_dma = alx->descmem.dma;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+ alx->rxq.rrd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz);
+ alx->rxq.rrd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+ alx->rxq.rfd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz);
+ alx->rxq.rfd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+ return 0;
+out_free:
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+ return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+ int err;
+
+ err = alx_alloc_descriptors(alx);
+ if (err)
+ return err;
+
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx->tx_ringsz = alx->tx_ringsz;
+
+ netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+ alx_reinit_rings(alx);
+ return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+ netif_napi_del(&alx->napi);
+ alx_free_buffers(alx);
+
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+
+ dma_free_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ alx->descmem.virt,
+ alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+ alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ /* level-1 interrupt switch */
+ alx_write_mem32(hw, ALX_ISR, 0);
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_post_write(hw);
+
+ synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+ u32 msi_ctrl;
+
+ msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+ if (!pci_enable_msi(alx->hw.pdev)) {
+ alx->msi = true;
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+ msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+ err = request_irq(pdev->irq, alx_intr_msi, 0,
+ alx->dev->name, alx);
+ if (!err)
+ goto out;
+ /* fall back to legacy interrupt */
+ pci_disable_msi(alx->hw.pdev);
+ }
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+ err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+ alx->dev->name, alx);
+out:
+ if (!err)
+ alx_config_vector_mapping(alx);
+ return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+
+ free_irq(pdev->irq, alx);
+
+ if (alx->msi) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->msi = false;
+ }
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ int rev = alx_hw_revision(hw);
+
+ if (rev > ALX_REV_C0)
+ return -EINVAL;
+
+ hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+ return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ err = alx_identify_hw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+ return err;
+ }
+
+ alx->hw.lnk_patch =
+ pdev->device == ALX_DEV_ID_AR8161 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+ pdev->subsystem_device == 0x0091 &&
+ pdev->revision == 0;
+
+ hw->smb_timer = 400;
+ hw->mtu = alx->dev->mtu;
+ alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+ alx->tx_ringsz = 256;
+ alx->rx_ringsz = 512;
+ hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+ hw->imt = 200;
+ alx->int_mask = ALX_ISR_MISC;
+ hw->dma_chnl = hw->max_dma_chnl;
+ hw->ith_tpd = alx->tx_ringsz / 3;
+ hw->link_speed = SPEED_UNKNOWN;
+ hw->adv_cfg = ADVERTISED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+ hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+ ALX_MAC_CTRL_MHASH_ALG_HI5B |
+ ALX_MAC_CTRL_BRD_EN |
+ ALX_MAC_CTRL_PCRCE |
+ ALX_MAC_CTRL_CRCE |
+ ALX_MAC_CTRL_RXFC_EN |
+ ALX_MAC_CTRL_TXFC_EN |
+ 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+ return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+ alx->dev->trans_start = jiffies;
+ if (netif_carrier_ok(alx->dev)) {
+ netif_carrier_off(alx->dev);
+ netif_tx_disable(alx->dev);
+ napi_disable(&alx->napi);
+ }
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_netif_stop(alx);
+ hw->link_speed = SPEED_UNKNOWN;
+
+ alx_reset_mac(hw);
+
+ /* disable l0s/l1 */
+ alx_enable_aspm(hw, false, false);
+ alx_irq_disable(alx);
+ alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_configure_basic(hw);
+ alx_disable_rss(hw);
+ __alx_set_rx_mode(alx->dev);
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+ /* hardware setting lost, restore it */
+ alx_reinit_rings(alx);
+ alx_configure(alx);
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+ ASSERT_RTNL();
+
+ alx_halt(alx);
+ alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+ (max_frame > ALX_MAX_FRAME_SIZE))
+ return -EINVAL;
+
+ if (netdev->mtu == mtu)
+ return 0;
+
+ netdev->mtu = mtu;
+ alx->hw.mtu = mtu;
+ alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+ ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+ netdev_update_features(netdev);
+ if (netif_running(netdev))
+ alx_reinit(alx);
+ return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+ netif_tx_wake_all_queues(alx->dev);
+ napi_enable(&alx->napi);
+ netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+ int err;
+
+ if (!resume)
+ netif_carrier_off(alx->dev);
+
+ err = alx_alloc_rings(alx);
+ if (err)
+ return err;
+
+ alx_configure(alx);
+
+ err = alx_request_irq(alx);
+ if (err)
+ goto out_free_rings;
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ if (!resume)
+ netif_tx_start_all_queues(alx->dev);
+
+ alx_schedule_link_check(alx);
+ return 0;
+
+out_free_rings:
+ alx_free_rings(alx);
+ return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+ alx_halt(alx);
+ alx_free_irq(alx);
+ alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return "1 Gbps Full";
+ case SPEED_100 + DUPLEX_FULL:
+ return "100 Mbps Full";
+ case SPEED_100 + DUPLEX_HALF:
+ return "100 Mbps Half";
+ case SPEED_10 + DUPLEX_FULL:
+ return "10 Mbps Full";
+ case SPEED_10 + DUPLEX_HALF:
+ return "10 Mbps Half";
+ default:
+ return "Unknown speed";
+ }
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ unsigned long flags;
+ int speed, old_speed;
+ int err;
+
+ /* clear PHY internal interrupt status, otherwise the main
+ * interrupt status will be asserted forever
+ */
+ alx_clear_phy_intr(hw);
+
+ err = alx_get_phy_link(hw, &speed);
+ if (err < 0)
+ goto reset;
+
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_PHY;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ old_speed = hw->link_speed;
+
+ if (old_speed == speed)
+ return;
+ hw->link_speed = speed;
+
+ if (speed != SPEED_UNKNOWN) {
+ netif_info(alx, link, alx->dev,
+ "NIC Up: %s\n", alx_speed_desc(speed));
+ alx_post_phy_link(hw);
+ alx_enable_aspm(hw, true, true);
+ alx_start_mac(hw);
+
+ if (old_speed == SPEED_UNKNOWN)
+ alx_netif_start(alx);
+ } else {
+ /* link is now down */
+ alx_netif_stop(alx);
+ netif_info(alx, link, alx->dev, "Link Down\n");
+ err = alx_reset_mac(hw);
+ if (err)
+ goto reset;
+ alx_irq_disable(alx);
+
+ /* MAC reset causes all HW settings to be lost, restore all */
+ err = alx_reinit_rings(alx);
+ if (err)
+ goto reset;
+ alx_configure(alx);
+ alx_enable_aspm(hw, false, true);
+ alx_post_phy_link(hw);
+ alx_irq_enable(alx);
+ }
+
+ return;
+
+reset:
+ alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+ return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+ __alx_stop(netdev_priv(netdev));
+ return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err, speed;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+ err = pci_save_state(pdev);
+ if (err)
+ return err;
+#endif
+
+ err = alx_select_powersaving_speed(hw, &speed);
+ if (err)
+ return err;
+ err = alx_clear_phy_intr(hw);
+ if (err)
+ return err;
+ err = alx_pre_suspend(hw, speed);
+ if (err)
+ return err;
+ err = alx_config_wol(hw);
+ if (err)
+ return err;
+
+ *wol_en = false;
+ if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+ netif_info(alx, wol, netdev,
+ "wol: ctrl=%X, speed=%X\n",
+ hw->sleep_ctrl, speed);
+ device_set_wakeup_enable(&pdev->dev, true);
+ *wol_en = true;
+ }
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (!err) {
+ pci_wake_from_d3(pdev, wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ dev_err(&pdev->dev, "shutdown fail %d\n", err);
+ }
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+ struct alx_priv *alx;
+
+ alx = container_of(work, struct alx_priv, link_check_wk);
+
+ rtnl_lock();
+ alx_check_link(alx);
+ rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+ struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+ rtnl_lock();
+ alx_reinit(alx);
+ rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+ u8 cso, css;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ cso = skb_checksum_start_offset(skb);
+ if (cso & 1)
+ return -EINVAL;
+
+ css = cso + skb->csum_offset;
+ first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+ first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+ first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+ return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *tpd, *first_tpd;
+ dma_addr_t dma;
+ int maplen, f, first_idx = txq->write_idx;
+
+ first_tpd = &txq->tpd[txq->write_idx];
+ tpd = first_tpd;
+
+ maplen = skb_headlen(skb);
+ dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+ tpd = &txq->tpd[txq->write_idx];
+
+ tpd->word1 = first_tpd->word1;
+
+ maplen = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+ maplen, DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+ }
+
+ /* last TPD, set EOP flag and store skb */
+ tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+ txq->bufs[txq->write_idx].skb = skb;
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+
+ return 0;
+
+err_dma:
+ f = first_idx;
+ while (f != txq->write_idx) {
+ alx_free_txbuf(alx, f);
+ if (++f == alx->tx_ringsz)
+ f = 0;
+ }
+ return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *first;
+ int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+ if (alx_tpd_avail(alx) < tpdreq) {
+ netif_stop_queue(alx->dev);
+ goto drop;
+ }
+
+ first = &txq->tpd[txq->write_idx];
+ memset(first, 0, sizeof(*first));
+
+ if (alx_tx_csum(skb, first))
+ goto drop;
+
+ if (alx_map_tx_skb(alx, skb) < 0)
+ goto drop;
+
+ netdev_sent_queue(alx->dev, skb->len);
+
+ /* flush updates before updating hardware */
+ wmb();
+ alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+ if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+ netif_stop_queue(alx->dev);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+
+ alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+ int prtad, int devad, u16 addr)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u16 val;
+ int err;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ err = alx_read_phy_reg(hw, addr, &val);
+ else
+ err = alx_read_phy_ext(hw, devad, addr, &val);
+
+ if (err)
+ return err;
+ return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+ int prtad, int devad, u16 addr, u16 val)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ return alx_write_phy_reg(hw, addr, val);
+
+ return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (alx->msi)
+ alx_intr_msi(0, alx);
+ else
+ alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+ .ndo_open = alx_open,
+ .ndo_stop = alx_stop,
+ .ndo_start_xmit = alx_start_xmit,
+ .ndo_set_rx_mode = alx_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = alx_set_mac_address,
+ .ndo_change_mtu = alx_change_mtu,
+ .ndo_do_ioctl = alx_ioctl,
+ .ndo_tx_timeout = alx_tx_timeout,
+ .ndo_fix_features = alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct alx_priv *alx;
+ struct alx_hw *hw;
+ bool phy_configured;
+ int bars, pm_cap, err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* The alx chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used for descriptors.
+ */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA config, aborting\n");
+ goto out_pci_disable;
+ }
+ }
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed(bars:%d)\n", bars);
+ goto out_pci_disable;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Can't find power management capability, aborting\n");
+ err = -EIO;
+ goto out_pci_release;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ goto out_pci_release;
+
+ netdev = alloc_etherdev(sizeof(*alx));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_pci_release;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ alx = netdev_priv(netdev);
+ alx->dev = netdev;
+ alx->hw.pdev = pdev;
+ alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+ hw = &alx->hw;
+ pci_set_drvdata(pdev, alx);
+
+ hw->hw_addr = pci_ioremap_bar(pdev, 0);
+ if (!hw->hw_addr) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -EIO;
+ goto out_free_netdev;
+ }
+
+ netdev->netdev_ops = &alx_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->irq = pdev->irq;
+ netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+ if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+ err = alx_init_sw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "net device private data init failed\n");
+ goto out_unmap;
+ }
+
+ alx_reset_pcie(hw);
+
+ phy_configured = alx_phy_configured(hw);
+
+ if (!phy_configured)
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+ goto out_unmap;
+ }
+
+ /* setup link to put it in a known good starting state */
+ if (!phy_configured) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to configure PHY speed/duplex (err=%d)\n",
+ err);
+ goto out_unmap;
+ }
+ }
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+ if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+ dev_warn(&pdev->dev,
+ "Invalid permanent address programmed, using random one\n");
+ eth_hw_addr_random(netdev);
+ memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+ }
+
+ memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+ memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+ hw->mdio.prtad = 0;
+ hw->mdio.mmds = 0;
+ hw->mdio.dev = netdev;
+ hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+ MDIO_SUPPORTS_C22 |
+ MDIO_EMULATE_C22;
+ hw->mdio.mdio_read = alx_mdio_read;
+ hw->mdio.mdio_write = alx_mdio_write;
+
+ if (!alx_get_phy_info(hw)) {
+ dev_err(&pdev->dev, "failed to identify PHY\n");
+ err = -EIO;
+ goto out_unmap;
+ }
+
+ INIT_WORK(&alx->link_check_wk, alx_link_check);
+ INIT_WORK(&alx->reset_wk, alx_reset);
+ spin_lock_init(&alx->hw.mdio_lock);
+ spin_lock_init(&alx->irq_lock);
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "register netdevice failed\n");
+ goto out_unmap;
+ }
+
+ device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+ netdev_info(netdev,
+ "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+ netdev->dev_addr);
+
+ return 0;
+
+out_unmap:
+ iounmap(hw->hw_addr);
+out_free_netdev:
+ free_netdev(netdev);
+out_pci_release:
+ pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ /* restore permanent mac address */
+ alx_set_macaddr(hw, hw->perm_addr);
+
+ unregister_netdev(alx->dev);
+ iounmap(hw->hw_addr);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (err) {
+ dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+ return err;
+ }
+
+ if (wol_en) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ hw->link_speed = SPEED_UNKNOWN;
+ alx->int_mask = ALX_ISR_MISC;
+
+ alx_reset_pcie(hw);
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:reset_mac fail %d\n", err);
+ return -EIO;
+ }
+
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:setup_speed_duplex fail %d\n", err);
+ return -EIO;
+ }
+
+ if (netif_running(netdev)) {
+ err = __alx_open(alx, true);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "pci error detected\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ alx_halt(alx);
+ }
+
+ if (state == pci_channel_io_perm_failure)
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "pci error slot reset\n");
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+ goto out;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ alx_reset_pcie(hw);
+ if (!alx_reset_mac(hw))
+ rc = PCI_ERS_RESULT_RECOVERED;
+out:
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+
+ dev_info(&pdev->dev, "pci error resume\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ alx_activate(alx);
+ netif_device_attach(netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+ .error_detected = alx_pci_error_detected,
+ .slot_reset = alx_pci_error_slot_reset,
+ .resume = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS (&alx_pm_ops)
+#else
+#define ALX_PM_OPS NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+ {}
+};
+
+static struct pci_driver alx_driver = {
+ .name = alx_drv_name,
+ .id_table = alx_pci_tbl,
+ .probe = alx_probe,
+ .remove = alx_remove,
+ .shutdown = alx_shutdown,
+ .err_handler = &alx_err_handlers,
+ .driver.pm = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+ "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 00000000000..e4358c98bc4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161 0x1091
+#define ALX_DEV_ID_E2200 0xe091
+#define ALX_DEV_ID_AR8162 0x1090
+#define ALX_DEV_ID_AR8171 0x10A1
+#define ALX_DEV_ID_AR8172 0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT 3
+#define ALX_REV_A0 0
+#define ALX_REV_A1 1
+#define ALX_REV_B0 2
+#define ALX_REV_C0 3
+
+#define ALX_DEV_CTRL 0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN 2
+
+#define ALX_MSIX_MASK 0x0090
+
+#define ALX_UE_SVRT 0x010C
+#define ALX_UE_SVRT_FCPROTERR BIT(13)
+#define ALX_UE_SVRT_DLPROTERR BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD 0x0204
+#define ALX_EFLD_F_EXIST BIT(10)
+#define ALX_EFLD_E_EXIST BIT(9)
+#define ALX_EFLD_STAT BIT(5)
+#define ALX_EFLD_START BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD 0x0218
+#define ALX_SLD_STAT BIT(12)
+#define ALX_SLD_START BIT(11)
+#define ALX_SLD_MAX_TO 100
+
+#define ALX_PDLL_TRNS1 0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
+
+#define ALX_PMCTRL 0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN BIT(30)
+#define ALX_PMCTRL_SADLY_EN BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
+#define ALX_PMCTRL_L1REG_TO_DEF 0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK 0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT 16
+#define ALX_PMCTRL_L1_TIMER_16US 4
+#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
+#define ALX_PMCTRL_L0S_EN BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
+#define ALX_PMCTRL_L1_EN BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER 0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
+#define ALX_MASTER_OOB_DIS BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST BIT(0)
+#define ALX_DMA_MAC_RST_TO 50
+
+#define ALX_IRQ_MODU_TIMER 0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT 0
+
+#define ALX_PHY_CTRL 0x140C
+#define ALX_PHY_CTRL_100AB_EN BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
+#define ALX_PHY_CTRL_HIB_EN BIT(10)
+#define ALX_PHY_CTRL_IDDQ BIT(7)
+#define ALX_PHY_CTRL_GATE_25M BIT(5)
+#define ALX_PHY_CTRL_LED_MODE BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO 80
+#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
+ ALX_PHY_CTRL_100AB_EN | \
+ ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS 0x1410
+#define ALX_MAC_STS_TXQ_BUSY BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
+#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
+ ALX_MAC_STS_RXQ_BUSY | \
+ ALX_MAC_STS_TXMAC_BUSY | \
+ ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO 0x1414
+#define ALX_MDIO_MODE_EXT BIT(30)
+#define ALX_MDIO_BUSY BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK 0x7
+#define ALX_MDIO_CLK_SEL_SHIFT 24
+#define ALX_MDIO_CLK_SEL_25MD4 0
+#define ALX_MDIO_CLK_SEL_25MD128 7
+#define ALX_MDIO_START BIT(23)
+#define ALX_MDIO_SPRES_PRMBL BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ BIT(21)
+#define ALX_MDIO_REG_MASK 0x1F
+#define ALX_MDIO_REG_SHIFT 16
+#define ALX_MDIO_DATA_MASK 0xFFFF
+#define ALX_MDIO_DATA_SHIFT 0
+#define ALX_MDIO_MAX_AC_TO 120
+
+#define ALX_MDIO_EXTN 0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
+#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT 0
+
+#define ALX_SERDES 0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
+
+#define ALX_LPI_CTRL 0x1440
+#define ALX_LPI_CTRL_EN BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL 0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
+#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL 0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_2 0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_3 0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER 0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF 0x400
+
+#define ALX_MAC_CTRL 0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
+#define ALX_MAC_CTRL_BRD_EN BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK 0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT 20
+#define ALX_MAC_CTRL_SPEED_10_100 1
+#define ALX_MAC_CTRL_SPEED_1000 2
+#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
+#define ALX_MAC_CTRL_PCRCE BIT(7)
+#define ALX_MAC_CTRL_CRCE BIT(6)
+#define ALX_MAC_CTRL_FULLD BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN BIT(2)
+#define ALX_MAC_CTRL_RX_EN BIT(1)
+#define ALX_MAC_CTRL_TX_EN BIT(0)
+
+#define ALX_STAD0 0x1488
+#define ALX_STAD1 0x148C
+
+#define ALX_HASH_TBL0 0x1490
+#define ALX_HASH_TBL1 0x1494
+
+#define ALX_MTU 0x149C
+#define ALX_MTU_JUMBO_TH 1514
+#define ALX_MTU_STD_ALGN 1536
+
+#define ALX_SRAM5 0x1524
+#define ALX_SRAM_RXF_LEN_MASK 0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT 0
+#define ALX_SRAM_RXF_LEN_8K (8*1024)
+
+#define ALX_SRAM9 0x1534
+#define ALX_SRAM_LOAD_PTR BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI 0x1540
+
+#define ALX_TX_BASE_ADDR_HI 0x1544
+
+#define ALX_RFD_ADDR_LO 0x1550
+#define ALX_RFD_RING_SZ 0x1560
+#define ALX_RFD_BUF_SZ 0x1564
+
+#define ALX_RRD_ADDR_LO 0x1568
+#define ALX_RRD_RING_SZ 0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO 0x14E4
+#define ALX_TPD_PRI2_ADDR_LO 0x14E0
+#define ALX_TPD_PRI1_ADDR_LO 0x157C
+#define ALX_TPD_PRI0_ADDR_LO 0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX 0x1618
+#define ALX_TPD_PRI2_PIDX 0x161A
+#define ALX_TPD_PRI1_PIDX 0x15F0
+#define ALX_TPD_PRI0_PIDX 0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX 0x161C
+#define ALX_TPD_PRI2_CIDX 0x161E
+#define ALX_TPD_PRI1_CIDX 0x15F4
+#define ALX_TPD_PRI0_CIDX 0x15F6
+
+#define ALX_TPD_RING_SZ 0x1584
+
+#define ALX_TXQ0 0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
+#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
+#define ALX_TXQ0_LSO_8023_EN BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE BIT(6)
+#define ALX_TXQ0_EN BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
+#define ALX_TXQ_TPD_BURSTPREF_DEF 5
+
+#define ALX_TXQ1 0x1594
+/* bit11: drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
+
+#define ALX_RXQ0 0x15A0
+#define ALX_RXQ0_EN BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK 0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT 26
+#define ALX_RXQ0_RSS_MODE_DIS 0
+#define ALX_RXQ0_RSS_MODE_MQMI 3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
+#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
+#define ALX_RXQ0_ASPM_THRESH_100M 3
+
+#define ALX_RXQ2 0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ * rx-packet(1522) + delay-of-link(64)
+ * = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
+
+#define ALX_DMA 0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK 0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT 26
+#define ALX_DMA_WDLY_CNT_MASK 0xF
+#define ALX_DMA_WDLY_CNT_SHIFT 16
+#define ALX_DMA_WDLY_CNT_DEF 4
+#define ALX_DMA_RDLY_CNT_MASK 0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT 11
+#define ALX_DMA_RDLY_CNT_DEF 15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK 0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT 4
+#define ALX_DMA_RORDER_MODE_MASK 0x7
+#define ALX_DMA_RORDER_MODE_SHIFT 0
+#define ALX_DMA_RORDER_MODE_OUT 4
+
+#define ALX_WOL0 0x14A0
+#define ALX_WOL0_PME_LINK BIT(5)
+#define ALX_WOL0_LINK_EN BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN BIT(3)
+#define ALX_WOL0_MAGIC_EN BIT(2)
+
+#define ALX_RFD_PIDX 0x15E0
+
+#define ALX_RFD_CIDX 0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE 0x1700
+#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
+#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
+#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
+
+#define ALX_ISR 0x1600
+#define ALX_ISR_DIS BIT(31)
+#define ALX_ISR_RX_Q7 BIT(30)
+#define ALX_ISR_RX_Q6 BIT(29)
+#define ALX_ISR_RX_Q5 BIT(28)
+#define ALX_ISR_RX_Q4 BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN BIT(26)
+#define ALX_ISR_RX_Q3 BIT(19)
+#define ALX_ISR_RX_Q2 BIT(18)
+#define ALX_ISR_RX_Q1 BIT(17)
+#define ALX_ISR_RX_Q0 BIT(16)
+#define ALX_ISR_TX_Q0 BIT(15)
+#define ALX_ISR_PHY BIT(12)
+#define ALX_ISR_DMAW BIT(10)
+#define ALX_ISR_DMAR BIT(9)
+#define ALX_ISR_TXF_UR BIT(8)
+#define ALX_ISR_TX_Q3 BIT(7)
+#define ALX_ISR_TX_Q2 BIT(6)
+#define ALX_ISR_TX_Q1 BIT(5)
+#define ALX_ISR_RFD_UR BIT(4)
+#define ALX_ISR_RXF_OV BIT(3)
+#define ALX_ISR_MANU BIT(2)
+#define ALX_ISR_TIMER BIT(1)
+#define ALX_ISR_SMB BIT(0)
+
+#define ALX_IMR 0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG 0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO 20000
+
+#define ALX_SMB_TIMER 0x15C4
+
+#define ALX_TINT_TPD_THRSHLD 0x15C8
+
+#define ALX_TINT_TIMER 0x15CC
+
+#define ALX_CLK_GATE 0x1814
+#define ALX_CLK_GATE_RXMAC BIT(5)
+#define ALX_CLK_GATE_TXMAC BIT(4)
+#define ALX_CLK_GATE_RXQ BIT(3)
+#define ALX_CLK_GATE_TXQ BIT(2)
+#define ALX_CLK_GATE_DMAR BIT(1)
+#define ALX_CLK_GATE_DMAW BIT(0)
+#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
+ ALX_CLK_GATE_TXMAC | \
+ ALX_CLK_GATE_RXQ | \
+ ALX_CLK_GATE_TXQ | \
+ ALX_CLK_GATE_DMAR | \
+ ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV 0x1804
+#define ALX_DRV_PHY_AUTO BIT(28)
+#define ALX_DRV_PHY_1000 BIT(27)
+#define ALX_DRV_PHY_100 BIT(26)
+#define ALX_DRV_PHY_10 BIT(25)
+#define ALX_DRV_PHY_DUPLEX BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK 0xFF
+#define ALX_DRV_PHY_SHIFT 21
+#define ALX_DRV_PHY_UNKNOWN 0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED 0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2 0x1830
+#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
+
+#define ALX_WOL_CTRL3 0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
+
+#define ALX_WOL_CTRL4 0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN BIT(0)
+
+#define ALX_WOL_CTRL5 0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL6 0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL7 0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL8 0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN0 0x1850
+#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN1 0x1854
+#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM0 0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM1 0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM2 0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM3 0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT 0
+
+#define ALX_ACER_MAGIC 0x1868
+#define ALX_ACER_MAGIC_EN BIT(31)
+#define ALX_ACER_MAGIC_PME_EN BIT(30)
+#define ALX_ACER_MAGIC_MATCH BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
+
+#define ALX_ACER_TIMER 0x186C
+#define ALX_ACER_TIMER_EN BIT(31)
+#define ALX_ACER_TIMER_PME_EN BIT(30)
+#define ALX_ACER_TIMER_MATCH BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT 0
+#define ALX_ACER_TIMER_THRES_DEF 1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0 0x14B0
+#define ALX_RSS_KEY1 0x14B4
+#define ALX_RSS_KEY2 0x14B8
+#define ALX_RSS_KEY3 0x14BC
+#define ALX_RSS_KEY4 0x14C0
+#define ALX_RSS_KEY5 0x14C4
+#define ALX_RSS_KEY6 0x14C8
+#define ALX_RSS_KEY7 0x14CC
+#define ALX_RSS_KEY8 0x14D0
+#define ALX_RSS_KEY9 0x14D4
+
+#define ALX_RSS_IDT_TBL0 0x1B00
+
+#define ALX_MSI_MAP_TBL1 0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
+
+#define ALX_MSI_MAP_TBL2 0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
+
+#define ALX_MSI_ID_MAP 0x15D4
+
+#define ALX_MSI_RETRANS_TIMER 0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT 0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR 0x1938
+#define ALX_WRR_PRI_MASK 0x3
+#define ALX_WRR_PRI_SHIFT 29
+#define ALX_WRR_PRI_RESTRICT_NONE 3
+#define ALX_WRR_PRI3_MASK 0x1F
+#define ALX_WRR_PRI3_SHIFT 24
+#define ALX_WRR_PRI2_MASK 0x1F
+#define ALX_WRR_PRI2_SHIFT 16
+#define ALX_WRR_PRI1_MASK 0x1F
+#define ALX_WRR_PRI1_SHIFT 8
+#define ALX_WRR_PRI0_MASK 0x1F
+#define ALX_WRR_PRI0_SHIFT 0
+
+#define ALX_HQTPD 0x193C
+#define ALX_HQTPD_BURST_EN BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
+#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
+#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
+
+#define ALX_MISC 0x19C0
+#define ALX_MISC_PSW_OCP_MASK 0x7
+#define ALX_MISC_PSW_OCP_SHIFT 21
+#define ALX_MISC_PSW_OCP_DEF 0x7
+#define ALX_MISC_ISO_EN BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN BIT(3)
+
+#define ALX_MSIC2 0x19C8
+#define ALX_MSIC2_CALB_START BIT(0)
+
+#define ALX_MISC3 0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE 0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR 0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
+#define ALX_GIGA_PSSR_DPLX 0x2000
+#define ALX_GIGA_PSSR_SPEED 0xC000
+#define ALX_GIGA_PSSR_10MBS 0x0000
+#define ALX_GIGA_PSSR_100MBS 0x4000
+#define ALX_GIGA_PSSR_1000MBS 0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER 0x12
+#define ALX_IER_LINK_UP 0x0400
+#define ALX_IER_LINK_DOWN 0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR 0x13
+
+#define ALX_MII_DBG_ADDR 0x1D
+#define ALX_MII_DBG_DATA 0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL 0x00
+#define ALX_ANACTRL_DEF 0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL 0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD 0x05
+#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
+#define ALX_SRDSYSMOD_DEF 0x2C46
+
+#define ALX_MIIDBG_HIBNEG 0x0B
+#define ALX_HIBNEG_PSHIB_EN 0x8000
+#define ALX_HIBNEG_HIB_PSE 0x1000
+#define ALX_HIBNEG_DEF 0xBC40
+#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
+ ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG 0x12
+#define ALX_TST10BTCFG_DEF 0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT 0x15
+#define ALX_AZ_ANADECT_DEF 0x3220
+#define ALX_AZ_ANADECT_LONG 0x3210
+
+#define ALX_MIIDBG_MSE16DB 0x18
+#define ALX_MSE16DB_UP 0x05EA
+#define ALX_MSE16DB_DOWN 0x02EA
+
+#define ALX_MIIDBG_MSE20DB 0x1C
+#define ALX_MSE20DB_TH_MASK 0x7F
+#define ALX_MSE20DB_TH_SHIFT 2
+#define ALX_MSE20DB_TH_DEF 0x2E
+#define ALX_MSE20DB_TH_HI 0x54
+
+#define ALX_MIIDBG_AGC 0x23
+#define ALX_AGC_2_VGA_MASK 0x3FU
+#define ALX_AGC_2_VGA_SHIFT 8
+#define ALX_AGC_LONG1G_LIMT 40
+#define ALX_AGC_LONG100M_LIMT 44
+
+#define ALX_MIIDBG_LEGCYPS 0x29
+#define ALX_LEGCYPS_EN 0x8000
+#define ALX_LEGCYPS_DEF 0x129D
+
+#define ALX_MIIDBG_TST100BTCFG 0x36
+#define ALX_TST100BTCFG_DEF 0xE12C
+
+#define ALX_MIIDBG_GREENCFG 0x3B
+#define ALX_GREENCFG_DEF 0x7078
+
+#define ALX_MIIDBG_GREENCFG2 0x3D
+#define ALX_GREENCFG2_BP_GREEN 0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS 3
+
+#define ALX_MIIEXT_CLDCTRL3 0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
+
+#define ALX_MIIEXT_CLDCTRL5 0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
+
+#define ALX_MIIEXT_CLDCTRL6 0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
+
+#define ALX_MIIEXT_VDRVBIAS 0x8062
+#define ALX_VDRVBIAS_DEF 0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG 7
+
+#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
+#define ALX_LOCAL_EEEADV_1000BT 0x0004
+#define ALX_LOCAL_EEEADV_100BT 0x0002
+
+#define ALX_MIIEXT_AFE 0x801A
+#define ALX_AFE_10BT_100M_TH 0x0040
+
+#define ALX_MIIEXT_S3DIG10 0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL 0x0001
+#define ALX_MIIEXT_S3DIG10_DEF 0
+
+#define ALX_MIIEXT_NLP78 0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 297fc138fa1..986df04fdcb 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1790,6 +1790,9 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
+ if (tg3_flag(tp, NO_FWARE_REPORTED))
+ return 0;
+
if (tg3_flag(tp, IS_SSB_CORE)) {
/* We don't use firmware. */
return 0;
@@ -10475,6 +10478,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
*/
static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
+ /* Chip may have been just powered on. If so, the boot code may still
+ * be running initialization. Wait for it to finish to avoid races in
+ * accessing the hardware.
+ */
+ tg3_enable_register_access(tp);
+ tg3_poll_fw(tp);
+
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 6e8bc9d88c4..94d957d203a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len - offset;
+ file->f_pos = debug->buffer_len + offset;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 28a5e425fec..92306b32084 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,6 +76,12 @@ int tulip_refill_rx(struct net_device *dev)
mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&tp->pdev->dev, mapping)) {
+ dev_kfree_skb(skb);
+ tp->rx_buffers[entry].skb = NULL;
+ break;
+ }
+
tp->rx_buffers[entry].mapping = mapping;
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9aef457dacb..98efc29eaa5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4259,6 +4259,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
netdev->features |= NETIF_F_HIGHDMA;
} else {
status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (!status)
+ status = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a2eadc07032..8cb600ccaf1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -722,8 +722,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0) {
- pr_err("Device reset fail\n");
+ if (cnt <= 0) {
+ pr_err("Device reset failed\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -1260,16 +1260,23 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
-#if defined(CONFIG_ARCH_R8A7740)
- desc_status >>= 16;
-#endif
-
if (--boguscnt < 0)
break;
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
+#if defined(CONFIG_ARCH_R8A7740)
+ /*
+ * In case of almost all GETHER/ETHERs, the Receive Frame State
+ * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
+ * bit 0. However, in case of the R8A7740's GETHER, the RFS
+ * bits are from bit 25 to bit 16. So, the driver needs right
+ * shifting by 16.
+ */
+ desc_status >>= 16;
+#endif
+
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 618446ae1ec..ee919ca8b8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1899,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if (netif_msg_pktdata(priv)) {
- pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
+ pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
__func__, (priv->cur_tx % txsize),
(priv->dirty_tx % txsize), entry, first, nfrags);
if (priv->extend_desc)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a45f64eef87..101b037a7dc 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1681,7 +1681,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
- if (!ndev) {
+ if (!priv->cpts) {
pr_err("error allocating cpts\n");
goto clean_ndev_ret;
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 12aec173564..c47f0dbcebb 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- pm_runtime_put_sync(data->dev);
-
data->suspended = true;
spin_unlock(&data->lock);
+ pm_runtime_put_sync(data->dev);
return 0;
}
@@ -460,15 +459,12 @@ static int davinci_mdio_suspend(struct device *dev)
static int davinci_mdio_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- u32 ctrl;
- spin_lock(&data->lock);
pm_runtime_get_sync(data->dev);
+ spin_lock(&data->lock);
/* restart the scan state machine */
- ctrl = __raw_readl(&data->regs->control);
- ctrl |= CONTROL_ENABLE;
- __raw_writel(ctrl, &data->regs->control);
+ __davinci_mdio_reset(data);
data->suspended = false;
spin_unlock(&data->lock);
@@ -477,8 +473,8 @@ static int davinci_mdio_resume(struct device *dev)
}
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend = davinci_mdio_suspend,
- .resume = davinci_mdio_resume,
+ .suspend_late = davinci_mdio_suspend,
+ .resume_early = davinci_mdio_resume,
};
static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b5d9a..4dccead586b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
+ if (packet->vlan_tci & VLAN_TAG_PRESENT)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_tci);
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index edfddc5f61b..d811b06e2cc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -853,18 +853,24 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
- if (data && data[IFLA_MACVLAN_MODE])
- vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
-
- if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(vlan->lowerdev, -1);
- else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(vlan->lowerdev, 1);
+ if (vlan->port->passthru && promisc) {
+ int err;
+
+ if (flags & MACVLAN_FLAG_NOPROMISC)
+ err = dev_set_promiscuity(vlan->lowerdev, -1);
+ else
+ err = dev_set_promiscuity(vlan->lowerdev, 1);
+ if (err < 0)
+ return err;
+ }
vlan->flags = flags;
}
+ if (data && data[IFLA_MACVLAN_MODE])
+ vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e46fef38685..bff7e0b0b4e 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1111,8 +1111,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
port->index = -1;
- team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
+ team_port_enable(team, port);
__team_compute_features(team);
__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
__team_options_change_check(team);
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 5ca14d463ba..7f032e21134 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
port_index = random_N(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
+ if (unlikely(!port))
+ goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 90311c7375f..53665850b59 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -33,6 +33,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = team_num_to_port_index(team,
rr_priv(team)->sent_packets++);
port = team_get_port_by_index_rcu(team, port_index);
+ if (unlikely(!port))
+ goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a3442706546..cea2fe4e981 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
u32 numqueues = 0;
rcu_read_lock();
- numqueues = tun->numqueues;
+ numqueues = ACCESS_ONCE(tun->numqueues);
txq = skb_get_rxhash(skb);
if (txq) {
@@ -2157,6 +2157,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
INIT_LIST_HEAD(&tfile->next);
+ sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+
return 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 078795fe6e3..04ee044dde5 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -627,6 +627,12 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Huawei E1820 - handled by qmi_wwan */
+{
+ USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
+ .driver_info = 0,
+},
+
/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86adfa0a912..d095d0d3056 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8111565c35f..f6dce13c8f8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -603,18 +603,22 @@ skip:
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
+ * Return true if packet is bogus and should be droppped.
*/
-static void vxlan_snoop(struct net_device *dev,
+static bool vxlan_snoop(struct net_device *dev,
__be32 src_ip, const u8 *src_mac)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- int err;
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
if (likely(f->remote.remote_ip == src_ip))
- return;
+ return false;
+
+ /* Don't migrate static entries, drop packets */
+ if (f->state & NUD_NOARP)
+ return true;
if (net_ratelimit())
netdev_info(dev,
@@ -626,14 +630,19 @@ static void vxlan_snoop(struct net_device *dev,
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, src_mac, src_ip,
- NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
- vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
+
+ /* close off race between vxlan_flush and incoming packets */
+ if (netif_running(dev))
+ vxlan_fdb_create(vxlan, src_mac, src_ip,
+ NUD_REACHABLE,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->dst_port,
+ vxlan->default_dst.remote_vni,
+ 0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
}
+
+ return false;
}
@@ -766,8 +775,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vxlan->dev->dev_addr) == 0)
goto drop;
- if (vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+ if ((vxlan->flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+ goto drop;
__skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
@@ -1190,9 +1200,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *skb1;
skb1 = skb_clone(skb, GFP_ATOMIC);
- rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
- if (rc == NETDEV_TX_OK)
- rc = rc1;
+ if (skb1) {
+ rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ }
}
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 3b078515b42..760ab3fe09e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,13 +84,17 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
-config ATH9K_RATE_CONTROL
+config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
- default y
+ default n
---help---
Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht.
+ module instead of minstrel_ht. Be warned that there are various
+ issues with the ath9k RC and minstrel is a more robust algorithm.
+ Note that even if this option is selected, "ath9k_rate_control"
+ has to be passed to mac80211 using the module parameter,
+ ieee80211_default_rc_algo.
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 2ad8f9474ba..75ee9e7704c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,7 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
+ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db5ffada221..7546b9a7dcb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x1a1a1a1a},
- {0x0000a084, 0x1a1a1a1a},
- {0x0000a088, 0x1a1a1a1a},
- {0x0000a08c, 0x1a1a1a1a},
- {0x0000a090, 0x171a1a1a},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
{0x0000a094, 0x11111717},
{0x0000a098, 0x00030311},
{0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index daba841808c..389ee1b5997 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -792,8 +792,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- if (AR_SREV_5416(sc->sc_ah))
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
@@ -831,10 +830,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
sc->ant_rx = hw->wiphy->available_antennas_rx;
sc->ant_tx = hw->wiphy->available_antennas_tx;
-#ifdef CONFIG_ATH9K_RATE_CONTROL
- hw->rate_control_algorithm = "ath9k_rate_control";
-#endif
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&sc->sbands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 267dbfcfaa9..b9a87383cb4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
}
#endif
-#ifdef CONFIG_ATH9K_RATE_CONTROL
+#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
int ath_rate_control_register(void);
void ath_rate_control_unregister(void);
#else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6dd07e2ec59..a95b77ab360 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2458,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work)
for (i = 0; i < B43_NR_FWTYPES; i++) {
errmsg = ctx->errors[i];
if (strlen(errmsg))
- b43err(dev->wl, errmsg);
+ b43err(dev->wl, "%s", errmsg);
}
b43_print_fw_helptext(dev->wl, 1);
goto out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f2235978..2c593570497 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@ fail:
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
+ if (drvr->iflist[0]) {
+ free_netdev(ifp->ndev);
+ drvr->iflist[0] = NULL;
+ }
if (p2p_ifp) {
free_netdev(p2p_ifp->ndev);
drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd18..9fd6f2fef11 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
*/
static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{
- /* disallow PS when one of the following global conditions meets */
- if (!wlc->pub->associated)
- return false;
-
- /* disallow PS when one of these meets when not scanning */
- if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
- return false;
-
- if (wlc->bsscfg->type == BRCMS_TYPE_AP)
- return false;
-
- if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
- return false;
-
- return true;
+ /* not supporting PS so always return false for now */
+ return false;
}
static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1..fe31590a51b 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
+ info->control.rates[0].count = 1;
D_RATE("leave: %d\n", idx);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e12..ed3c42a63a4 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
-
+ info->control.rates[0].count = 1;
}
static void *
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index f8246f2d88f..4caaf52986a 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
u32 beacon_interval);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops il_pm_ops;
#define IL_LEGACY_PM_OPS (&il_pm_ops)
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_SLEEP */
#define IL_LEGACY_PM_OPS NULL
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_SLEEP */
/*****************************************************
* Error Handling Debugging
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 94314a8e102..8fe76dc4f37 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
-
+ info->control.rates[0].count = 1;
}
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00b..cd1ad001918 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
- if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
+ if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
return;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 4f886133639..2f690e578b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
*/
if (load_module) {
err = request_module("%s", op->name);
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err);
+#endif
}
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index d6beec765d6..31587a318f8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2652,6 +2652,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
+ info->control.rates[0].count = 1;
}
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index a830eb6cc41..b9ba4e71ea4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
} else if (ieee80211_is_back_req(fc)) {
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ tx_cmd->tx_flags |=
+ cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
}
/* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 753b5682d53..a5f9875cfd6 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -26,10 +26,17 @@
static struct dentry *mwifiex_dfs_dir;
static char *bss_modes[] = {
- "Unknown",
- "Ad-hoc",
- "Managed",
- "Auto"
+ "UNSPECIFIED",
+ "ADHOC",
+ "STATION",
+ "AP",
+ "AP_VLAN",
+ "WDS",
+ "MONITOR",
+ "MESH_POINT",
+ "P2P_CLIENT",
+ "P2P_GO",
+ "P2P_DEVICE",
};
/* size/addr for mwifiex_debug_info */
@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "driver_version = %s", fmt);
p += sprintf(p, "\nverext = %s", priv->version_str);
p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
- p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
+
+ if (info.bss_mode >= ARRAY_SIZE(bss_modes))
+ p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
+ else
+ p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
+
p += sprintf(p, "media_state=\"%s\"\n",
(!priv->media_connected ? "Disconnected" : "Connected"));
p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ead3a3e746a..3aa30ddcbfe 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
+ *
+ * TODO: add different temperature compensation code for RT3290 & RT5390
+ * to allow to use BBP_R1 for those chips.
*/
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- if (delta <= -12) {
- power_ctrl = 2;
- delta += 12;
- } else if (delta <= -6) {
- power_ctrl = 1;
- delta += 6;
- } else {
- power_ctrl = 0;
+ if (!rt2x00_rt(rt2x00dev, RT3290) &&
+ !rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_read(rt2x00dev, 1, &r1);
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
+ }
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+ rt2800_bbp_write(rt2x00dev, 1, r1);
}
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
- rt2800_bbp_write(rt2x00dev, 1, r1);
+
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 999ffc12578..c97e9d32733 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
"can't alloc skb for rx\n");
goto done;
}
+ kmemleak_not_leak(new_skb);
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 3d0498e69c8..189ba124a8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
-void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level)
+static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 ratr_value = (u32) mac->basic_rates;
- u8 *mcsrate = mac->mcs;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
u8 ratr_index = 0;
u8 nmode = mac->ht_enable;
- u8 mimo_ps = 1;
- u16 shortgi_rate = 0;
- u32 tmp_ratr_value = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = mac->sgi_40;
- u8 curshortgi_20mhz = mac->sgi_20;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
enum wireless_mode wirelessmode = mac->mode;
- ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
nmode = 1;
- if (mimo_ps == 0) {
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
ratr_value &= 0x0007F005;
} else {
u32 ratr_mask;
@@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
ratr_mask = 0x000ff005;
else
ratr_mask = 0x0f0ff005;
- if (curtxbw_40mhz)
- ratr_mask |= 0x00000010;
+
ratr_value &= ratr_mask;
}
break;
@@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
ratr_value &= 0x000ff0ff;
else
ratr_value &= 0x0f0ff0ff;
+
break;
}
+
ratr_value &= 0x0FFFFFFF;
- if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz))) {
+
+ if (nmode && ((curtxbw_40mhz &&
+ curshortgi_40mhz) || (!curtxbw_40mhz &&
+ curshortgi_20mhz))) {
+
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);
+
for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
if ((1 << shortgi_rate) & tmp_ratr_value)
break;
}
+
shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
- (shortgi_rate << 4) | (shortgi_rate);
+ (shortgi_rate << 4) | (shortgi_rate);
}
+
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
-void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 ratr_bitmap = (u32) mac->basic_rates;
- u8 *p_mcsrate = mac->mcs;
- u8 ratr_index = 0;
- u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = mac->sgi_40;
- u8 curshortgi_20mhz = mac->sgi_20;
- enum wireless_mode wirelessmode = mac->mode;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = curtxbw_40mhz &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
bool shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
- u8 mimops = 1;
-
- ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_bitmap = sta->supp_rates[1] << 4;
+ else
+ ratr_bitmap = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
@@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
break;
case WIRELESS_MODE_G:
ratr_index = RATR_INX_WIRELESS_GB;
+
if (rssi_level == 1)
ratr_bitmap &= 0x00000f00;
else if (rssi_level == 2)
@@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
ratr_index = RATR_INX_WIRELESS_NGB;
- if (mimops == 0) {
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
if (rssi_level == 1)
ratr_bitmap &= 0x00070000;
else if (rssi_level == 2)
@@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
}
}
}
+
if ((curtxbw_40mhz && curshortgi_40mhz) ||
(!curtxbw_40mhz && curshortgi_20mhz)) {
+
if (macid == 0)
shortgi = true;
else if (macid == 1)
@@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
break;
default:
ratr_index = RATR_INX_WIRELESS_NGB;
+
if (rtlphy->rf_type == RF_1T2R)
ratr_bitmap &= 0x000ff0ff;
else
ratr_bitmap &= 0x0f0ff0ff;
break;
}
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n",
- ratr_bitmap);
- *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
- ratr_index << 28);
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"Rate_index:%x, ratr_val:%x, %5phC\n",
ratr_index, ratr_bitmap, rate_mask);
- rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+ memcpy(rtlpriv->rate_mask, rate_mask, 5);
+ /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
+ * "scheduled while atomic" if called directly */
+ schedule_work(&rtlpriv->works.fill_h2c_cmd);
+
+ if (macid != 0)
+ sta_entry->ratr_index = ratr_index;
+}
+
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.useramask)
+ rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ rtl92cu_update_hal_rate_table(hw, sta);
}
void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index f41a3aa4a26..8e3ec1e2564 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level);
-void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 85b6bdb163c..da4f587199e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
macaddr = cam_const_broad;
entry_id = key_index;
} else {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ entry_id = rtl_cam_get_free_entry(hw,
+ p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_SEC,
+ DBG_EMERG,
+ "Can not find free hw security cam entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+
key_index = PAIRWISE_KEYIDX;
- entry_id = CAM_PAIRWISE_KEY_POSITION;
is_pairwise = true;
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"delete one entry\n");
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 938b1e670b9..826f085c29d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.update_interrupt_mask = rtl92cu_update_interrupt_mask,
.get_hw_reg = rtl92cu_get_hw_reg,
.set_hw_reg = rtl92cu_set_hw_reg,
- .update_rate_tbl = rtl92cu_update_hal_rate_table,
- .update_rate_mask = rtl92cu_update_hal_rate_mask,
+ .update_rate_tbl = rtl92cu_update_hal_rate_tbl,
.fill_tx_desc = rtl92cu_tx_fill_desc,
.fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
.fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
@@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
.phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
.dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+ .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
};
static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index a1310abd0d5..262e1e4c6e5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask);
void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 76732b0cd22..a3532e07787 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
/* should after adapter start and interrupt enable. */
set_hal_stop(rtlhal);
+ cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
/* Enable software */
SET_USB_STOP(rtlusb);
rtl_usb_deinit(hw);
@@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
return false;
}
+static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, fill_h2c_cmd);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
+}
+
static struct rtl_intf_ops rtl_usb_ops = {
.adapter_start = rtl_usb_start,
.adapter_stop = rtl_usb_stop,
@@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf,
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
+ INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
+ rtl_fill_h2c_cmd_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 44328baa638..cc03e7c87cb 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1736,6 +1736,8 @@ struct rtl_hal_ops {
void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
bool mstate);
void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
};
struct rtl_intf_ops {
@@ -1869,6 +1871,7 @@ struct rtl_works {
struct delayed_work fwevt_wq;
struct work_struct lps_change_work;
+ struct work_struct fill_h2c_cmd;
};
struct rtl_debug {
@@ -2048,6 +2051,7 @@ struct rtl_priv {
};
};
bool enter_ps; /* true when entering PS */
+ u8 rate_mask[5];
/*This must be the last item so
that it points to the data allocated
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index affdb3ec622..4a0bbb13806 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
- sizeof(cmd->channels_2));
+ sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 222d0354020..9e5484a7366 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -36,12 +36,12 @@
#define WL127X_IFTYPE_SR_VER 3
#define WL127X_MAJOR_SR_VER 10
#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
-#define WL127X_MINOR_SR_VER 115
+#define WL127X_MINOR_SR_VER 133
/* minimum multi-role FW version for wl127x */
#define WL127X_IFTYPE_MR_VER 5
#define WL127X_MAJOR_MR_VER 7
#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
-#define WL127X_MINOR_MR_VER 115
+#define WL127X_MINOR_MR_VER 42
/* FW chip version for wl128x */
#define WL128X_CHIP_VER 7
@@ -49,7 +49,7 @@
#define WL128X_IFTYPE_SR_VER 3
#define WL128X_MAJOR_SR_VER 10
#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
-#define WL128X_MINOR_SR_VER 115
+#define WL128X_MINOR_SR_VER 133
/* minimum multi-role FW version for wl128x */
#define WL128X_IFTYPE_MR_VER 5
#define WL128X_MAJOR_MR_VER 7
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 09d944505ac..2b642f8c926 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
- sizeof(cmd->channels_2));
+ sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 82576fffb45..a0b50ad2ef3 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -778,12 +778,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret && list_empty(&vif->notify_list))
- list_add_tail(&vif->notify_list, &notify);
xenvif_notify_tx_completion(vif);
- xenvif_put(vif);
+ if (ret && list_empty(&vif->notify_list))
+ list_add_tail(&vif->notify_list, &notify);
+ else
+ xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -791,6 +792,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
notify_remote_via_irq(vif->rx_irq);
list_del_init(&vif->notify_list);
+ xenvif_put(vif);
}
/* More work to do? */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f53b992f060..a6f584a7f4a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -192,14 +192,15 @@ EXPORT_SYMBOL(of_find_property);
struct device_node *of_find_all_nodes(struct device_node *prev)
{
struct device_node *np;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = prev ? prev->allnext : of_allnodes;
for (; np != NULL; np = np->allnext)
if (of_node_get(np))
break;
of_node_put(prev);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling) {
if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
break;
}
of_node_put(prev);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
for (np = of_allnodes; np; np = np->allnext)
if (np->phandle == handle)
break;
of_node_get(np);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 791a6719d8a..8cd90e7e945 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = {
};
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
- /* OVC */
- 150, 154,
+ /* PENC */
+ 154,
};
static const unsigned int usb0_mux[] = {
- USB_OVC0_MARK, USB_PENC0_MARK,
+ USB_PENC0_MARK,
+};
+static const unsigned int usb0_ovc_pins[] = {
+ /* USB_OVC */
+ 150
+};
+static const unsigned int usb0_ovc_mux[] = {
+ USB_OVC0_MARK,
};
/* - USB1 ------------------------------------------------------------------- */
static const unsigned int usb1_pins[] = {
- /* OVC */
- 152, 155,
+ /* PENC */
+ 155,
};
static const unsigned int usb1_mux[] = {
- USB_OVC1_MARK, USB_PENC1_MARK,
+ USB_PENC1_MARK,
+};
+static const unsigned int usb1_ovc_pins[] = {
+ /* USB_OVC */
+ 152,
+};
+static const unsigned int usb1_ovc_mux[] = {
+ USB_OVC1_MARK,
};
/* - USB2 ------------------------------------------------------------------- */
static const unsigned int usb2_pins[] = {
- /* OVC, PENC */
- 125, 156,
+ /* PENC */
+ 156,
};
static const unsigned int usb2_mux[] = {
- USB_OVC2_MARK, USB_PENC2_MARK,
+ USB_PENC2_MARK,
+};
+static const unsigned int usb2_ovc_pins[] = {
+ /* USB_OVC */
+ 125,
+};
+static const unsigned int usb2_ovc_mux[] = {
+ USB_OVC2_MARK,
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc),
SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb1_ovc),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb2_ovc),
};
static const char * const du0_groups[] = {
@@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = {
static const char * const usb0_groups[] = {
"usb0",
+ "usb0_ovc",
};
static const char * const usb1_groups[] = {
"usb1",
+ "usb1_ovc",
};
static const char * const usb2_groups[] = {
"usb2",
+ "usb2_ovc",
};
static const struct sh_pfc_function pinmux_functions[] = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8df0c5a21be..d111c8687f9 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
- rfkill_set_hw_state(bluetooth_rfkill,
+ rfkill_set_hw_state(gps_rfkill,
hp_wmi_get_hw_state(HPWMI_GPS));
err = rfkill_register(gps_rfkill);
if (err)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0eab77b2234..f296f3f7db9 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -42,10 +43,65 @@
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
+struct at91_rtc_config {
+ bool use_shadow_imr;
+};
+
+static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
+static DEFINE_SPINLOCK(at91_rtc_lock);
+static u32 at91_rtc_shadow_imr;
+
+static void at91_rtc_write_ier(u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ at91_rtc_shadow_imr |= mask;
+ at91_rtc_write(AT91_RTC_IER, mask);
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static void at91_rtc_write_idr(u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ at91_rtc_write(AT91_RTC_IDR, mask);
+ /*
+ * Register read back (of any RTC-register) needed to make sure
+ * IDR-register write has reached the peripheral before updating
+ * shadow mask.
+ *
+ * Note that there is still a possibility that the mask is updated
+ * before interrupts have actually been disabled in hardware. The only
+ * way to be certain would be to poll the IMR-register, which is is
+ * the very register we are trying to emulate. The register read back
+ * is a reasonable heuristic.
+ */
+ at91_rtc_read(AT91_RTC_SR);
+ at91_rtc_shadow_imr &= ~mask;
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static u32 at91_rtc_read_imr(void)
+{
+ unsigned long flags;
+ u32 mask;
+
+ if (at91_rtc_config->use_shadow_imr) {
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ mask = at91_rtc_shadow_imr;
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+ } else {
+ mask = at91_rtc_read(AT91_RTC_IMR);
+ }
+
+ return mask;
+}
/*
* Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write_ier(AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write_idr(AT91_RTC_ACKUPD);
at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alrm->enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
} else
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
return 0;
}
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read_imr();
seq_printf(seq, "update_IRQ\t: %s\n",
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
return IRQ_NONE; /* not handled */
}
+static const struct at91_rtc_config at91rm9200_config = {
+};
+
+static const struct at91_rtc_config at91sam9x5_config = {
+ .use_shadow_imr = true,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id at91_rtc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-rtc",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-rtc",
+ .data = &at91sam9x5_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
+#endif
+
+static const struct at91_rtc_config *
+at91_rtc_get_config(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+
+ if (pdev->dev.of_node) {
+ match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ return (const struct at91_rtc_config *)match->data;
+ }
+
+ return &at91rm9200_config;
+}
+
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
.set_time = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
struct resource *regs;
int ret = 0;
+ at91_rtc_config = at91_rtc_get_config(pdev);
+ if (!at91_rtc_config)
+ return -ENODEV;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_imr = at91_rtc_read_imr()
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
enable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write_idr(at91_rtc_imr);
}
return 0;
}
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
if (device_may_wakeup(dev))
disable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write_ier(at91_rtc_imr);
}
return 0;
}
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
-static const struct of_device_id at91_rtc_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-rtc" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-
static struct platform_driver at91_rtc_driver = {
.remove = __exit_p(at91_rtc_remove),
.driver = {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cc5bea9c4b1..f1cb706445c 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
}
spin_lock_irq(&rtc_lock);
+ if (device_may_wakeup(dev))
+ hpet_rtc_timer_init();
+
do {
CMOS_WRITE(tmp, RTC_CONTROL);
hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
rtc_update_irq(cmos->rtc, 1, mask);
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
- hpet_rtc_timer_init();
} while (mask & RTC_AIE);
spin_unlock_irq(&rtc_lock);
}
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 459c2ffc95a..426901cef14 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
return ret;
}
+ device_init_wakeup(&pdev->dev, 1);
+
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
&tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
disable_irq(rtc->irq);
- device_set_wakeup_capable(&pdev->dev, 1);
return 0;
fail_rtc_register:
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 8751a5240c9..b2eab34f38d 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
return 0;
out2:
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c87ea..9ca3996f65b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2040,6 +2040,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
netiucv_setup_netdevice);
if (!dev)
return NULL;
+ rtnl_lock();
if (dev_alloc_name(dev, dev->name) < 0)
goto out_netdev;
@@ -2061,6 +2062,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
out_fsm:
kfree_fsm(privptr->fsm);
out_netdev:
+ rtnl_unlock();
free_netdev(dev);
return NULL;
}
@@ -2100,6 +2102,7 @@ static ssize_t conn_write(struct device_driver *drv,
rc = netiucv_register_device(dev);
if (rc) {
+ rtnl_unlock();
IUCV_DBF_TEXT_(setup, 2,
"ret %d from netiucv_register_device\n", rc);
goto out_free_ndev;
@@ -2109,7 +2112,8 @@ static ssize_t conn_write(struct device_driver *drv,
priv = netdev_priv(dev);
SET_NETDEV_DEV(dev, priv->dev);
- rc = register_netdev(dev);
+ rc = register_netdevice(dev);
+ rtnl_unlock();
if (rc)
goto out_unreg;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 439c012be76..b63d534192e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len - offset;
+ file->f_pos = debug->buffer_len + offset;
break;
default:
return -EINVAL;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index adc1f7f471f..85e1ffd0e5c 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
pos = file->f_pos + offset;
break;
case 2:
- pos = fnic_dbg_prt->buffer_len - offset;
+ pos = fnic_dbg_prt->buffer_len + offset;
}
return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
-EINVAL : (file->f_pos = pos);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f63f5ff7f27..f525ecb7a9c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
pos = file->f_pos + off;
break;
case 2:
- pos = debug->len - off;
+ pos = debug->len + off;
}
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 60cfae51c71..eab593eaaaf 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
if ((mask & hspi_read(hspi, SPSR)) == val)
return 0;
- msleep(20);
+ udelay(10);
}
dev_err(hspi->dev, "timeout\n");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 35f60bd252d..637d728fbeb 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
return 0;
err_spi_register_master:
- free_irq(board_dat->pdev->irq, board_dat);
+ free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
pd_dev = platform_device_alloc("pch-spi", i);
if (!pd_dev) {
dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ retval = -ENOMEM;
goto err_platform_device;
}
pd_dev_save->pd_save[i] = pd_dev;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e1d76960742..34d18dcfa0d 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u32 ipif_ier;
- u16 cr;
/* We get here with transmitter inhibited */
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->remaining_bytes = t->len;
INIT_COMPLETION(xspi->done);
- xilinx_spi_fill_tx_fifo(xspi);
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
xspi->regs + XIPIF_V123B_IIER_OFFSET);
- /* Start the transfer by not inhibiting the transmitter any longer */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
- ~XSPI_CR_TRANS_INHIBIT;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ for (;;) {
+ u16 cr;
+ u8 sr;
+
+ xilinx_spi_fill_tx_fifo(xspi);
+
+ /* Start the transfer by not inhibiting the transmitter any
+ * longer
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+ ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ wait_for_completion(&xspi->done);
+
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+
+ /* Read out all the data from the Rx FIFO */
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->rx_fn(xspi);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ }
- wait_for_completion(&xspi->done);
+ /* See if there is more data to send */
+ if (!xspi->remaining_bytes > 0)
+ break;
+ }
/* Disable the transmit empty interrupt */
xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- u16 cr;
- u8 sr;
-
- /* A transmit has just completed. Process received data and
- * check for more data to transmit. Always inhibit the
- * transmitter while the Isr refills the transmit register/FIFO,
- * or make sure it is stopped if we're done.
- */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- xspi->rx_fn(xspi);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- }
-
- /* See if there is more data to send */
- if (xspi->remaining_bytes > 0) {
- xilinx_spi_fill_tx_fifo(xspi);
- /* Start the transfer by not inhibiting the
- * transmitter any longer
- */
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- } else {
- /* No more data to send.
- * Indicate the transfer is completed.
- */
- complete(&xspi->done);
- }
+ complete(&xspi->done);
}
return IRQ_HANDLED;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 49b098bedf9..475c9c11468 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -276,8 +276,9 @@ static void ci_role_work(struct work_struct *work)
ci_role_stop(ci);
ci_role_start(ci, role);
- enable_irq(ci->irq);
}
+
+ enable_irq(ci->irq);
}
static irqreturn_t ci_irq(int irq, void *data)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 519ead2443c..b501346484a 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1678,8 +1678,11 @@ static int udc_start(struct ci13xxx *ci)
ci->gadget.ep0 = &ci->ep0in->ep;
- if (ci->global_phy)
+ if (ci->global_phy) {
ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR(ci->transceiver))
+ ci->transceiver = NULL;
+ }
if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@ static int udc_start(struct ci13xxx *ci)
goto put_transceiver;
}
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
retval = otg_set_peripheral(ci->transceiver->otg,
&ci->gadget);
if (retval)
@@ -1711,7 +1714,7 @@ static int udc_start(struct ci13xxx *ci)
return retval;
remove_trans:
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@ remove_trans:
dev_err(dev, "error = %i\n", retval);
put_transceiver:
- if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
+ if (ci->transceiver && ci->global_phy)
usb_put_phy(ci->transceiver);
destroy_eps:
destroy_eps(ci);
@@ -1747,7 +1750,7 @@ static void udc_stop(struct ci13xxx *ci)
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 090b411d893..7d8dd5aad23 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
/* FIXME - Stubbed out for now */
/* Don't change anything if nothing has changed */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* Do the real work here... */
- tty_termios_copy_hw(&tty->termios, old_termios);
+ if (old_termios)
+ tty_termios_copy_hw(&tty->termios, old_termios);
}
static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
int result;
/* Setup termios */
if (tty)
- f81232_set_termios(tty, port, &tmp_termios);
+ f81232_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7151659367a..048cd44d51b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
serial settings even to the same values as before. Thus
we actually need to filter in this specific case */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
if (!buf) {
dev_err(&port->dev, "%s - out of memory.\n", __func__);
/* Report back no change occurred */
- tty->termios = *old_termios;
+ if (old_termios)
+ tty->termios = *old_termios;
return;
}
@@ -433,7 +434,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
- else if ((old_termios->c_cflag & CBAUD) == B0)
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
@@ -492,7 +493,6 @@ static void pl2303_close(struct usb_serial_port *port)
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Setup termios */
if (tty)
- pl2303_set_termios(tty, port, &tmp_termios);
+ pl2303_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index cf3df793c2b..ddf6c47137d 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -291,7 +291,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
- unsigned int old_cflag = old_termios->c_cflag;
unsigned short uartdata;
unsigned char buf[2] = {0, 0};
int baud;
@@ -299,15 +298,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
u8 control;
/* check that they really want us to change something */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* set DTR/RTS active */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
- if ((old_cflag & CBAUD) == B0) {
+ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
priv->line_control |= MCR_DTR;
- if (!(old_cflag & CRTSCTS))
+ if (!(old_termios->c_cflag & CRTSCTS))
priv->line_control |= MCR_RTS;
}
if (control != priv->line_control) {
@@ -394,7 +393,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
int ret;
@@ -411,7 +409,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
spcp8x5_set_ctrl_line(port, priv->line_control);
if (tty)
- spcp8x5_set_termios(tty, port, &tmp_termios);
+ spcp8x5_set_termios(tty, port, NULL);
port->port.drain_delay = 256;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index acb7121a931..6d78736563d 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = {
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
- if (MINOR(dev->devt) == 0)
+ if (mode && (MINOR(dev->devt) == 0))
*mode = S_IRUGO | S_IWUGO;
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2b51e2336aa..f80d3dd41d8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{
-
- bool zcopy;
int i;
- for (i = 0; i < n->dev.nvqs; ++i) {
- zcopy = vhost_net_zcopy_mask & (0x1 << i);
- if (zcopy)
- kfree(n->vqs[i].ubuf_info);
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ kfree(n->vqs[i].ubuf_info);
+ n->vqs[i].ubuf_info = NULL;
}
}
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
bool zcopy;
int i;
- for (i = 0; i < n->dev.nvqs; ++i) {
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
zcopy = vhost_net_zcopy_mask & (0x1 << i);
if (!zcopy)
continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
return 0;
err:
- while (i--) {
- zcopy = vhost_net_zcopy_mask & (0x1 << i);
- if (!zcopy)
- continue;
- kfree(n->vqs[i].ubuf_info);
- }
+ vhost_net_clear_ubuf_info(n);
return -ENOMEM;
}
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
{
int i;
+ vhost_net_clear_ubuf_info(n);
+
for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
n->vqs[i].done_idx = 0;
n->vqs[i].upend_idx = 0;
n->vqs[i].ubufs = NULL;
- kfree(n->vqs[i].ubuf_info);
- n->vqs[i].ubuf_info = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
kref_get(&ubufs->kref);
}
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- }
+ } else
+ msg.msg_control = NULL;
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
int r;
mutex_lock(&n->dev.mutex);
+ if (vhost_dev_has_owner(&n->dev)) {
+ r = -EBUSY;
+ goto out;
+ }
r = vhost_net_set_ubuf_info(n);
if (r)
goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5787e..60aa5ad09a2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
}
/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+ return dev->mm;
+}
+
+/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
/* Is there an owner already? */
- if (dev->mm) {
+ if (vhost_dev_has_owner(dev)) {
err = -EBUSY;
goto err_mm;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a7ad6359298..64adcf99ff3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -133,6 +133,7 @@ struct vhost_dev {
long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
+bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
struct vhost_memory *vhost_dev_reset_owner_prepare(void);
void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index cc072c66c76..0f0493c6337 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -379,10 +379,10 @@ static int xen_tmem_init(void)
#ifdef CONFIG_FRONTSWAP
if (tmem_enabled && frontswap) {
char *s = "";
- struct frontswap_ops *old_ops =
- frontswap_register_ops(&tmem_frontswap_ops);
+ struct frontswap_ops *old_ops;
tmem_frontswap_poolid = -1;
+ old_ops = frontswap_register_ops(&tmem_frontswap_ops);
if (IS_ERR(old_ops) || old_ops) {
if (IS_ERR(old_ops))
return PTR_ERR(old_ops);
diff --git a/fs/aio.c b/fs/aio.c
index 7fe5bdee163..2bbcacf74d0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
for (i = 0; i < ctx->nr_pages; i++)
put_page(ctx->ring_pages[i]);
- if (ctx->mmap_size)
- vm_munmap(ctx->mmap_base, ctx->mmap_size);
-
if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
kfree(ctx->ring_pages);
}
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
aio_free_ring(ctx);
- spin_lock(&aio_nr_lock);
- BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
- aio_nr -= ctx->max_reqs;
- spin_unlock(&aio_nr_lock);
-
pr_debug("freeing %p\n", ctx);
/*
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
{
if (!atomic_xchg(&ctx->dead, 1)) {
hlist_del_rcu(&ctx->list);
- /* Between hlist_del_rcu() and dropping the initial ref */
- synchronize_rcu();
/*
- * We can't punt to workqueue here because put_ioctx() ->
- * free_ioctx() will unmap the ringbuffer, and that has to be
- * done in the original process's context. kill_ioctx_rcu/work()
- * exist for exit_aio(), as in that path free_ioctx() won't do
- * the unmap.
+ * It'd be more correct to do this in free_ioctx(), after all
+ * the outstanding kiocbs have finished - but by then io_destroy
+ * has already returned, so io_setup() could potentially return
+ * -EAGAIN with no ioctxs actually in use (as far as userspace
+ * could tell).
*/
- kill_ioctx_work(&ctx->rcu_work);
+ spin_lock(&aio_nr_lock);
+ BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
+ aio_nr -= ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+
+ if (ctx->mmap_size)
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
+
+ /* Between hlist_del_rcu() and dropping the initial ref */
+ call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
}
}
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
*/
ctx->mmap_size = 0;
- if (!atomic_xchg(&ctx->dead, 1)) {
- hlist_del_rcu(&ctx->list);
- call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
- }
+ kill_ioctx(ctx);
}
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e7b3cb5286a..b8b60b660c8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2859,8 +2859,8 @@ fail_qgroup:
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
- del_fs_roots(fs_info);
btrfs_cleanup_transaction(fs_info->tree_root);
+ del_fs_roots(fs_info);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
@@ -3512,15 +3512,15 @@ int close_ctree(struct btrfs_root *root)
percpu_counter_sum(&fs_info->delalloc_bytes));
}
- free_root_pointers(fs_info, 1);
-
btrfs_free_block_groups(fs_info);
+ btrfs_stop_all_workers(fs_info);
+
del_fs_roots(fs_info);
- iput(fs_info->btree_inode);
+ free_root_pointers(fs_info, 1);
- btrfs_stop_all_workers(fs_info);
+ iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(root, CHECK_INTEGRITY))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index af978f7682b..17f3064b4a3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8012,6 +8012,9 @@ int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ if (root == NULL)
+ return 1;
+
/* the snap/subvol tree is on deleting */
if (btrfs_root_refs(&root->root_item) == 0 &&
root != root->fs_info->tree_root)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 395b82031a4..4febca4fc2d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4082,7 +4082,7 @@ out:
return inode;
}
-static struct reloc_control *alloc_reloc_control(void)
+static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
{
struct reloc_control *rc;
@@ -4093,7 +4093,8 @@ static struct reloc_control *alloc_reloc_control(void)
INIT_LIST_HEAD(&rc->reloc_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
- extent_io_tree_init(&rc->processed_blocks, NULL);
+ extent_io_tree_init(&rc->processed_blocks,
+ fs_info->btree_inode->i_mapping);
return rc;
}
@@ -4110,7 +4111,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
int rw = 0;
int err = 0;
- rc = alloc_reloc_control();
+ rc = alloc_reloc_control(fs_info);
if (!rc)
return -ENOMEM;
@@ -4311,7 +4312,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
if (list_empty(&reloc_roots))
goto out;
- rc = alloc_reloc_control();
+ rc = alloc_reloc_control(root->fs_info);
if (!rc) {
err = -ENOMEM;
goto out;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 202dd3d68be..ebbf680378e 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
}
/**
- * Encode the flock and fcntl locks for the given inode into the pagelist.
- * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
- * sequential flock locks.
- * Must be called with lock_flocks() already held.
- * If we encounter more of a specific lock type than expected,
- * we return the value 1.
+ * Encode the flock and fcntl locks for the given inode into the ceph_filelock
+ * array. Must be called with lock_flocks() already held.
+ * If we encounter more of a specific lock type than expected, return -ENOSPC.
*/
-int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
- int num_fcntl_locks, int num_flock_locks)
+int ceph_encode_locks_to_buffer(struct inode *inode,
+ struct ceph_filelock *flocks,
+ int num_fcntl_locks, int num_flock_locks)
{
struct file_lock *lock;
- struct ceph_filelock cephlock;
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
+ int l = 0;
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks);
- err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32));
- if (err)
- goto fail;
+
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
if (lock->fl_flags & FL_POSIX) {
++seen_fcntl;
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &cephlock);
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
- err = ceph_pagelist_append(pagelist, &cephlock,
- sizeof(struct ceph_filelock));
+ ++l;
}
- if (err)
- goto fail;
}
-
- err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
- if (err)
- goto fail;
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
if (lock->fl_flags & FL_FLOCK) {
++seen_flock;
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
err = -ENOSPC;
goto fail;
}
- err = lock_to_ceph_filelock(lock, &cephlock);
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
- err = ceph_pagelist_append(pagelist, &cephlock,
- sizeof(struct ceph_filelock));
+ ++l;
}
- if (err)
- goto fail;
}
fail:
return err;
}
+/**
+ * Copy the encoded flock and fcntl locks into the pagelist.
+ * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
+ * sequential flock locks.
+ * Returns zero on success.
+ */
+int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
+ struct ceph_pagelist *pagelist,
+ int num_fcntl_locks, int num_flock_locks)
+{
+ int err = 0;
+ __le32 nlocks;
+
+ nlocks = cpu_to_le32(num_fcntl_locks);
+ err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
+ if (err)
+ goto out_fail;
+
+ err = ceph_pagelist_append(pagelist, flocks,
+ num_fcntl_locks * sizeof(*flocks));
+ if (err)
+ goto out_fail;
+
+ nlocks = cpu_to_le32(num_flock_locks);
+ err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
+ if (err)
+ goto out_fail;
+
+ err = ceph_pagelist_append(pagelist,
+ &flocks[num_fcntl_locks],
+ num_flock_locks * sizeof(*flocks));
+out_fail:
+ return err;
+}
+
/*
* Given a pointer to a lock, convert it to a ceph filelock
*/
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f22671a5bd..4d2920304be 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
if (recon_state->flock) {
int num_fcntl_locks, num_flock_locks;
- struct ceph_pagelist_cursor trunc_point;
-
- ceph_pagelist_set_cursor(pagelist, &trunc_point);
- do {
- lock_flocks();
- ceph_count_locks(inode, &num_fcntl_locks,
- &num_flock_locks);
- rec.v2.flock_len = (2*sizeof(u32) +
- (num_fcntl_locks+num_flock_locks) *
- sizeof(struct ceph_filelock));
- unlock_flocks();
-
- /* pre-alloc pagelist */
- ceph_pagelist_truncate(pagelist, &trunc_point);
- err = ceph_pagelist_append(pagelist, &rec, reclen);
- if (!err)
- err = ceph_pagelist_reserve(pagelist,
- rec.v2.flock_len);
-
- /* encode locks */
- if (!err) {
- lock_flocks();
- err = ceph_encode_locks(inode,
- pagelist,
- num_fcntl_locks,
- num_flock_locks);
- unlock_flocks();
- }
- } while (err == -ENOSPC);
+ struct ceph_filelock *flocks;
+
+encode_again:
+ lock_flocks();
+ ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
+ unlock_flocks();
+ flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
+ sizeof(struct ceph_filelock), GFP_NOFS);
+ if (!flocks) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ lock_flocks();
+ err = ceph_encode_locks_to_buffer(inode, flocks,
+ num_fcntl_locks,
+ num_flock_locks);
+ unlock_flocks();
+ if (err) {
+ kfree(flocks);
+ if (err == -ENOSPC)
+ goto encode_again;
+ goto out_free;
+ }
+ /*
+ * number of encoded locks is stable, so copy to pagelist
+ */
+ rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
+ (num_fcntl_locks+num_flock_locks) *
+ sizeof(struct ceph_filelock));
+ err = ceph_pagelist_append(pagelist, &rec, reclen);
+ if (!err)
+ err = ceph_locks_to_pagelist(flocks, pagelist,
+ num_fcntl_locks,
+ num_flock_locks);
+ kfree(flocks);
} else {
err = ceph_pagelist_append(pagelist, &rec, reclen);
}
-
out_free:
kfree(path);
out_dput:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 8696be2ff67..7ccfdb4aea2 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops;
extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
-extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p,
- int p_locks, int f_locks);
+extern int ceph_encode_locks_to_buffer(struct inode *inode,
+ struct ceph_filelock *flocks,
+ int num_fcntl_locks,
+ int num_flock_locks);
+extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
+ struct ceph_pagelist *pagelist,
+ int num_fcntl_locks, int num_flock_locks);
extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
/* debugfs.c */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5b97e56ddbc..e3bc39bb9d1 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3279,8 +3279,8 @@ build_unc_path_to_root(const struct smb_vol *vol,
pos = full_path + unc_len;
if (pplen) {
- *pos++ = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos, vol->prepath, pplen);
+ *pos = CIFS_DIR_SEP(cifs_sb);
+ strncpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 201f0a0d6b0..a7abbea2c09 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -295,6 +295,12 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
+ int rc;
+
+ rc = filemap_write_and_wait(file->f_mapping);
+ if (rc)
+ return rc;
+
return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
}
diff --git a/fs/file_table.c b/fs/file_table.c
index cd4d87a8295..485dc0eddd6 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -306,17 +306,18 @@ void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
+ unsigned long flags;
+
file_sb_list_del(file);
- if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
- unsigned long flags;
- spin_lock_irqsave(&delayed_fput_lock, flags);
- list_add(&file->f_u.fu_list, &delayed_fput_list);
- schedule_work(&delayed_fput_work);
- spin_unlock_irqrestore(&delayed_fput_lock, flags);
- return;
+ if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ init_task_work(&file->f_u.fu_rcuhead, ____fput);
+ if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
+ return;
}
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- task_work_add(task, &file->f_u.fu_rcuhead, true);
+ spin_lock_irqsave(&delayed_fput_lock, flags);
+ list_add(&file->f_u.fu_list, &delayed_fput_list);
+ schedule_work(&delayed_fput_work);
+ spin_unlock_irqrestore(&delayed_fput_lock, flags);
}
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 3027f4dbbab..e4ba5fe4c3b 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -109,10 +109,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
+ hpfs_lock(inode->i_sb);
+
if (to > inode->i_size) {
truncate_pagecache(inode, to, inode->i_size);
hpfs_truncate(inode);
}
+
+ hpfs_unlock(inode->i_sb);
}
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
diff --git a/fs/namei.c b/fs/namei.c
index 85e40d1c0a8..9ed9361223c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1976,7 +1976,7 @@ static int path_lookupat(int dfd, const char *name,
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
- if (!nd->inode->i_op->lookup) {
+ if (!can_lookup(nd->inode)) {
path_put(&nd->path);
err = -ENOTDIR;
}
@@ -2850,7 +2850,7 @@ finish_lookup:
if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
goto out;
error = -ENOTDIR;
- if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
+ if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
goto out;
audit_inode(name, nd->path.dentry, 0);
finish_open:
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 81632609365..6792ce11f2b 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1029,15 +1029,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- /*
- * fail with EBUSY if there are still references to this
- * directory.
- */
- dentry_unhash(dentry);
- error = -EBUSY;
- if (!d_unhashed(dentry))
- goto out;
-
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b3fdd1a323d..e68588e6b1e 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1408,6 +1408,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
mres->lockname_len, mres->lockname);
ret = -EFAULT;
spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
goto leave;
}
res->state |= DLM_LOCK_RES_MIGRATING;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 04ee1b57c24..b4a5cdf9dbc 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -947,7 +947,7 @@ leave:
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&lookup);
- if (status)
+ if (status && (status != -ENOTEMPTY))
mlog_errno(status);
return status;
@@ -2216,7 +2216,7 @@ out:
brelse(orphan_dir_bh);
- return 0;
+ return ret;
}
int ocfs2_create_inode_in_orphan(struct inode *dir,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd51e50001f..c3834dad09b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2118,6 +2118,7 @@ static int show_timer(struct seq_file *m, void *v)
nstr[notify & ~SIGEV_THREAD_ID],
(notify & SIGEV_THREAD_ID) ? "tid" : "pid",
pid_nr_ns(timer->it_pid, tp->ns));
+ seq_printf(m, "ClockID: %d\n", timer->it_clock);
return 0;
}
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index bd4b5a740ff..bdfabdaefdc 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait;
static int kmsg_open(struct inode * inode, struct file * file)
{
- return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
}
static int kmsg_release(struct inode * inode, struct file * file)
{
- (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC);
return 0;
}
@@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
if ((file->f_flags & O_NONBLOCK) &&
- !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
return -EAGAIN;
- return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
}
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
- if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index f9d7846097e..444a7704596 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -128,6 +128,7 @@ struct xfs_attr3_leaf_hdr {
__u8 holes;
__u8 pad1;
struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+ __be32 pad2; /* 64 bit alignment */
};
#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 8804b8a3c31..0903960410a 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -2544,7 +2544,17 @@ xfs_btree_new_iroot(
if (error)
goto error0;
+ /*
+ * we can't just memcpy() the root in for CRC enabled btree blocks.
+ * In that case have to also ensure the blkno remains correct
+ */
memcpy(cblock, block, xfs_btree_block_len(cur));
+ if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
+ else
+ cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
+ }
be16_add_cpu(&block->bb_level, 1);
xfs_btree_set_numrecs(block, 1);
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index 995f1f505a5..7826782b8d7 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -266,6 +266,7 @@ struct xfs_dir3_blk_hdr {
struct xfs_dir3_data_hdr {
struct xfs_dir3_blk_hdr hdr;
xfs_dir2_data_free_t best_free[XFS_DIR2_DATA_FD_COUNT];
+ __be32 pad; /* 64 bit alignment */
};
#define XFS_DIR3_DATA_CRC_OFF offsetof(struct xfs_dir3_data_hdr, hdr.crc)
@@ -477,7 +478,7 @@ struct xfs_dir3_leaf_hdr {
struct xfs_da3_blkinfo info; /* header for da routines */
__be16 count; /* count of entries */
__be16 stale; /* count of stale entries */
- __be32 pad;
+ __be32 pad; /* 64 bit alignment */
};
struct xfs_dir3_icleaf_hdr {
@@ -715,7 +716,7 @@ struct xfs_dir3_free_hdr {
__be32 firstdb; /* db of first entry */
__be32 nvalid; /* count of valid entries */
__be32 nused; /* count of used entries */
- __be32 pad; /* 64 bit alignment. */
+ __be32 pad; /* 64 bit alignment */
};
struct xfs_dir3_free {
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 45a85ff84da..7cf5e4eafe2 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1845,7 +1845,13 @@ xlog_recover_do_inode_buffer(
xfs_agino_t *buffer_nextp;
trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
- bp->b_ops = &xfs_inode_buf_ops;
+
+ /*
+ * Post recovery validation only works properly on CRC enabled
+ * filesystems.
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ bp->b_ops = &xfs_inode_buf_ops;
inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
@@ -2205,7 +2211,16 @@ xlog_recover_do_reg_buffer(
/* Shouldn't be any more regions */
ASSERT(i == item->ri_total);
- xlog_recovery_validate_buf_type(mp, bp, buf_f);
+ /*
+ * We can only do post recovery validation on items on CRC enabled
+ * fielsystems as we need to know when the buffer was written to be able
+ * to determine if we should have replayed the item. If we replay old
+ * metadata over a newer buffer, then it will enter a temporarily
+ * inconsistent state resulting in verification failures. Hence for now
+ * just avoid the verification stage for non-crc filesystems
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ xlog_recovery_validate_buf_type(mp, bp, buf_f);
}
/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index f6bfbd73466..e8e310c0509 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -314,7 +314,8 @@ STATIC int
xfs_mount_validate_sb(
xfs_mount_t *mp,
xfs_sb_t *sbp,
- bool check_inprogress)
+ bool check_inprogress,
+ bool check_version)
{
/*
@@ -337,9 +338,10 @@ xfs_mount_validate_sb(
/*
* Version 5 superblock feature mask validation. Reject combinations the
- * kernel cannot support up front before checking anything else.
+ * kernel cannot support up front before checking anything else. For
+ * write validation, we don't need to check feature masks.
*/
- if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+ if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
xfs_alert(mp,
"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
"Use of these features in this kernel is at your own risk!");
@@ -675,7 +677,8 @@ xfs_sb_to_disk(
static int
xfs_sb_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ bool check_version)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_sb sb;
@@ -686,7 +689,8 @@ xfs_sb_verify(
* Only check the in progress field for the primary superblock as
* mkfs.xfs doesn't clear it from secondary superblocks.
*/
- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
+ return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+ check_version);
}
/*
@@ -719,7 +723,7 @@ xfs_sb_read_verify(
goto out_error;
}
}
- error = xfs_sb_verify(bp);
+ error = xfs_sb_verify(bp, true);
out_error:
if (error) {
@@ -758,7 +762,7 @@ xfs_sb_write_verify(
struct xfs_buf_log_item *bip = bp->b_fspriv;
int error;
- error = xfs_sb_verify(bp);
+ error = xfs_sb_verify(bp, false);
if (error) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, error);
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 9d96605f160..fa25becbdca 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c6f6e0839b6..9f3c7e81270 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
+extern void cpu_hotplug_disable(void);
+extern void cpu_hotplug_enable(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
+#define cpu_hotplug_disable() do { } while (0)
+#define cpu_hotplug_enable() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 56a6b7fbb3c..a6ac84871d6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
#ifdef CONFIG_BPF_JIT
#include <stdarg.h>
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index b662045a81c..f6156f91eb1 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -260,12 +260,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
return port;
cur = port;
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (team_port_txable(port))
+ if (team_port_txable(cur))
return cur;
list_for_each_entry_rcu(cur, &team->port_list, list) {
if (cur == port)
break;
- if (team_port_txable(port))
+ if (team_port_txable(cur))
return cur;
}
return NULL;
diff --git a/include/linux/math64.h b/include/linux/math64.h
index b8ba8554472..2913b86eb12 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,7 +6,8 @@
#if BITS_PER_LONG == 64
-#define div64_long(x,y) div64_s64((x),(y))
+#define div64_long(x, y) div64_s64((x), (y))
+#define div64_ul(x, y) div64_u64((x), (y))
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
@@ -47,7 +48,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
#elif BITS_PER_LONG == 32
-#define div64_long(x,y) div_s64((x),(y))
+#define div64_long(x, y) div_s64((x), (y))
+#define div64_ul(x, y) div_u64((x), (y))
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8089e35d47a..f4b1001a467 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -461,6 +461,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
&(pos)->member)), typeof(*(pos)), member))
/**
+ * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ *
+ * This is the same as hlist_for_each_entry_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4ccd68e49b0..ddcc7826d90 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+/*
+ * The tracing infrastructure traces RCU (we want that), but unfortunately
+ * some of the RCU checks causes tracing to lock up the system.
+ *
+ * The tracing version of rcu_dereference_raw() must not call
+ * rcu_read_lock_held().
+ */
+#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
+
/**
* rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 5951e3f3887..26806775b11 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -111,6 +111,9 @@ static inline struct page *sg_page(struct scatterlist *sg)
static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(!virt_addr_valid(buf));
+#endif
sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index e6564c1dc55..c8488763277 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/irqflags.h>
extern void cpu_idle(void);
@@ -139,13 +140,17 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func,info,wait) \
- ({ \
- local_irq_disable(); \
- func(info); \
- local_irq_enable(); \
- 0; \
- })
+
+static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ return 0;
+}
+
/*
* Note we still need to test the mask even for UP
* because we actually can get an empty mask from
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 47ead515c81..c5fd30d2a41 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
+extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
#else
#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
+static inline void migration_entry_wait_huge(struct mm_struct *mm,
+ pte_t *pte) { }
static inline int is_write_migration_entry(swp_entry_t entry)
{
return 0;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 38911391a13..98a3153c0f9 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -44,8 +44,8 @@
/* Return size of the log buffer */
#define SYSLOG_ACTION_SIZE_BUFFER 10
-#define SYSLOG_FROM_CALL 0
-#define SYSLOG_FROM_FILE 1
+#define SYSLOG_FROM_READER 0
+#define SYSLOG_FROM_PROC 1
int do_syslog(int type, char __user *buf, int count, bool from_file);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 2f322c38bd4..f8e084d0fc7 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -145,8 +145,8 @@ static inline void tracepoint_synchronize_unregister(void)
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond), \
- rcu_idle_exit(), \
- rcu_idle_enter()); \
+ rcu_irq_enter(), \
+ rcu_irq_exit()); \
}
#else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 35a57cd1704..7cb6d360d14 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1117,6 +1117,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_index_added(struct hci_dev *hdev);
int mgmt_index_removed(struct hci_dev *hdev);
+int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
int mgmt_powered(struct hci_dev *hdev, u8 powered);
int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 22980a7c387..9944c3e68c5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -42,6 +42,7 @@
#define MGMT_STATUS_NOT_POWERED 0x0f
#define MGMT_STATUS_CANCELLED 0x10
#define MGMT_STATUS_INVALID_INDEX 0x11
+#define MGMT_STATUS_RFKILLED 0x12
struct mgmt_hdr {
__le16 opcode;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 40b4dfce01f..1be442f8940 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -95,10 +95,10 @@ struct ip_tunnel_net {
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
- struct rtnl_link_ops *ops, char *devname);
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+ struct rtnl_link_ops *ops, char *devname);
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d4609029f01..385c6329a96 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -450,7 +450,8 @@ enum snd_soc_dapm_type {
snd_soc_dapm_aif_in, /* audio interface input */
snd_soc_dapm_aif_out, /* audio interface output */
snd_soc_dapm_siggen, /* signal generator */
- snd_soc_dapm_dai, /* link to DAI structure */
+ snd_soc_dapm_dai_in, /* link to DAI structure */
+ snd_soc_dapm_dai_out,
snd_soc_dapm_dai_link, /* link between two DAI structures */
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a5c86fc34a3..d88c8ee00c8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -783,6 +783,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_IA64 0x3000000000000000ULL
#define KVM_REG_ARM 0x4000000000000000ULL
#define KVM_REG_S390 0x5000000000000000ULL
+#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
diff --git a/init/Kconfig b/init/Kconfig
index 9d3a7887a6d..2d9b83104dc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -431,6 +431,7 @@ choice
config TREE_RCU
bool "Tree-based hierarchical RCU"
depends on !PREEMPT && SMP
+ select IRQ_WORK
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
diff --git a/kernel/audit.c b/kernel/audit.c
index 21c7fa615bd..91e53d04b6a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1056,7 +1056,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
static void wait_for_auditd(unsigned long sleep_time)
{
DECLARE_WAITQUEUE(wait, current);
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&audit_backlog_wait, &wait);
if (audit_backlog_limit &&
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index a291aa23fb3..43c307dc945 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -658,6 +658,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
struct vfsmount *mnt;
int err;
+ rule->tree = NULL;
list_for_each_entry(tree, &tree_list, list) {
if (!strcmp(seed->pathname, tree->pathname)) {
put_tree(seed);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b5e4ab2d427..198a38883e6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
mutex_unlock(&cpu_hotplug.lock);
}
+/*
+ * Wait for currently running CPU hotplug operations to complete (if any) and
+ * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
+ * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
+ * hotplug path before performing hotplug operations. So acquiring that lock
+ * guarantees mutual exclusion from any currently running hotplug operations.
+ */
+void cpu_hotplug_disable(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 1;
+ cpu_maps_update_done();
+}
+
+void cpu_hotplug_enable(void)
+{
+ cpu_maps_update_begin();
+ cpu_hotplug_disabled = 0;
+ cpu_maps_update_done();
+}
+
#else /* #if CONFIG_HOTPLUG_CPU */
static void cpu_hotplug_begin(void) {}
static void cpu_hotplug_done(void) {}
@@ -541,36 +562,6 @@ static int __init alloc_frozen_cpus(void)
core_initcall(alloc_frozen_cpus);
/*
- * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
- * hotplug when tasks are about to be frozen. Also, don't allow the freezer
- * to continue until any currently running CPU hotplug operation gets
- * completed.
- * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
- * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
- * CPU hotplug path and released only after it is complete. Thus, we
- * (and hence the freezer) will block here until any currently running CPU
- * hotplug operation gets completed.
- */
-void cpu_hotplug_disable_before_freeze(void)
-{
- cpu_maps_update_begin();
- cpu_hotplug_disabled = 1;
- cpu_maps_update_done();
-}
-
-
-/*
- * When tasks have been thawed, re-enable regular CPU hotplug (which had been
- * disabled while beginning to freeze tasks).
- */
-void cpu_hotplug_enable_after_thaw(void)
-{
- cpu_maps_update_begin();
- cpu_hotplug_disabled = 0;
- cpu_maps_update_done();
-}
-
-/*
* When callbacks for CPU hotplug notifications are being executed, we must
* ensure that the state of the system with respect to the tasks being frozen
* or not, as reported by the notification, remains unchanged *throughout the
@@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
- cpu_hotplug_disable_before_freeze();
+ cpu_hotplug_disable();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
- cpu_hotplug_enable_after_thaw();
+ cpu_hotplug_enable();
break;
default:
diff --git a/kernel/exit.c b/kernel/exit.c
index af2eb3cbd49..7bb73f9d09d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -649,7 +649,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
- exit_task_namespaces(tsk);
write_lock_irq(&tasklist_lock);
if (group_dead)
@@ -795,6 +794,7 @@ void do_exit(long code)
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
+ exit_task_namespaces(tsk);
exit_task_work(tsk);
check_stack_usage();
exit_thread();
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a83dde8ca0..54a4d522323 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -143,7 +143,10 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
- * @first_irq: first number of irq block assigned to the domain
+ * @first_irq: first number of irq block assigned to the domain,
+ * pass zero to assign irqs on-the-fly. This will result in a
+ * linear IRQ domain so it is important to use irq_create_mapping()
+ * for each used IRQ, especially when SPARSE_IRQ is enabled.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
@@ -191,6 +194,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
/* A linear domain is the default */
return irq_domain_add_linear(of_node, size, ops, host_data);
}
+EXPORT_SYMBOL_GPL(irq_domain_add_simple);
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
@@ -397,11 +401,12 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irq_data->hwirq;
+ irq_hw_number_t hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
+ hwirq = irq_data->hwirq;
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
diff --git a/kernel/printk.c b/kernel/printk.c
index fa36e149442..8212c1aef12 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -363,6 +363,53 @@ static void log_store(int facility, int level,
log_next_seq++;
}
+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
+int dmesg_restrict = 1;
+#else
+int dmesg_restrict;
+#endif
+
+static int syslog_action_restricted(int type)
+{
+ if (dmesg_restrict)
+ return 1;
+ /*
+ * Unless restricted, we allow "read all" and "get buffer size"
+ * for everybody.
+ */
+ return type != SYSLOG_ACTION_READ_ALL &&
+ type != SYSLOG_ACTION_SIZE_BUFFER;
+}
+
+static int check_syslog_permissions(int type, bool from_file)
+{
+ /*
+ * If this is from /proc/kmsg and we've already opened it, then we've
+ * already done the capabilities checks at open time.
+ */
+ if (from_file && type != SYSLOG_ACTION_OPEN)
+ return 0;
+
+ if (syslog_action_restricted(type)) {
+ if (capable(CAP_SYSLOG))
+ return 0;
+ /*
+ * For historical reasons, accept CAP_SYS_ADMIN too, with
+ * a warning.
+ */
+ if (capable(CAP_SYS_ADMIN)) {
+ pr_warn_once("%s (%d): Attempt to access syslog with "
+ "CAP_SYS_ADMIN but no CAP_SYSLOG "
+ "(deprecated).\n",
+ current->comm, task_pid_nr(current));
+ return 0;
+ }
+ return -EPERM;
+ }
+ return security_syslog(type);
+}
+
+
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
u64 seq;
@@ -620,7 +667,8 @@ static int devkmsg_open(struct inode *inode, struct file *file)
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
return 0;
- err = security_syslog(SYSLOG_ACTION_READ_ALL);
+ err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
+ SYSLOG_FROM_READER);
if (err)
return err;
@@ -813,45 +861,6 @@ static inline void boot_delay_msec(int level)
}
#endif
-#ifdef CONFIG_SECURITY_DMESG_RESTRICT
-int dmesg_restrict = 1;
-#else
-int dmesg_restrict;
-#endif
-
-static int syslog_action_restricted(int type)
-{
- if (dmesg_restrict)
- return 1;
- /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
-}
-
-static int check_syslog_permissions(int type, bool from_file)
-{
- /*
- * If this is from /proc/kmsg and we've already opened it, then we've
- * already done the capabilities checks at open time.
- */
- if (from_file && type != SYSLOG_ACTION_OPEN)
- return 0;
-
- if (syslog_action_restricted(type)) {
- if (capable(CAP_SYSLOG))
- return 0;
- /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
- if (capable(CAP_SYS_ADMIN)) {
- printk_once(KERN_WARNING "%s (%d): "
- "Attempt to access syslog with CAP_SYS_ADMIN "
- "but no CAP_SYSLOG (deprecated).\n",
- current->comm, task_pid_nr(current));
- return 0;
- }
- return -EPERM;
- }
- return 0;
-}
-
#if defined(CONFIG_PRINTK_TIME)
static bool printk_time = 1;
#else
@@ -1249,7 +1258,7 @@ out:
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
- return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
+ return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
/*
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 16ea6792501..35380019f0f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1451,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock);
#ifdef CONFIG_PROVE_RCU_DELAY
- if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 &&
+ if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
system_state == SYSTEM_RUNNING)
- schedule_timeout_uninterruptible(2);
+ udelay(200);
#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
cond_resched();
}
@@ -1613,6 +1613,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
}
}
+static void rsp_wakeup(struct irq_work *work)
+{
+ struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
+
+ /* Wake up rcu_gp_kthread() to start the grace period. */
+ wake_up(&rsp->gp_wq);
+}
+
/*
* Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold
@@ -1637,8 +1645,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
}
rsp->gp_flags = RCU_GP_FLAG_INIT;
- /* Wake up rcu_gp_kthread() to start the grace period. */
- wake_up(&rsp->gp_wq);
+ /*
+ * We can't do wakeups while holding the rnp->lock, as that
+ * could cause possible deadlocks with the rq->lock. Deter
+ * the wakeup to interrupt context.
+ */
+ irq_work_queue(&rsp->wakeup_work);
}
/*
@@ -3235,6 +3247,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
rsp->rda = rda;
init_waitqueue_head(&rsp->gp_wq);
+ init_irq_work(&rsp->wakeup_work, rsp_wakeup);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index da77a8f57ff..4df503470e4 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -27,6 +27,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
+#include <linux/irq_work.h>
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -442,6 +443,7 @@ struct rcu_state {
char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
struct list_head flavors; /* List of RCU flavors. */
+ struct irq_work wakeup_work; /* Postponed wakeups */
};
/* Values for rcu_state structure's gp_flags field. */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b5197dcb0da..3d6833f125d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -195,8 +195,12 @@ void local_bh_enable_ip(unsigned long ip)
EXPORT_SYMBOL(local_bh_enable_ip);
/*
- * We restart softirq processing for at most 2 ms,
- * and if need_resched() is not set.
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+ * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
+ * certain cases, such as stop_machine(), jiffies may cease to
+ * increment and so we need the MAX_SOFTIRQ_RESTART limit as
+ * well to make sure we eventually return from this method.
*
* These limits have been established via experimentation.
* The two things to balance is latency against fairness -
@@ -204,6 +208,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
+#define MAX_SOFTIRQ_RESTART 10
asmlinkage void __do_softirq(void)
{
@@ -212,6 +217,7 @@ asmlinkage void __do_softirq(void)
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
int cpu;
unsigned long old_flags = current->flags;
+ int max_restart = MAX_SOFTIRQ_RESTART;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -265,7 +271,8 @@ restart:
pending = local_softirq_pending();
if (pending) {
- if (time_before(jiffies, end) && !need_resched())
+ if (time_before(jiffies, end) && !need_resched() &&
+ --max_restart)
goto restart;
wakeup_softirqd();
diff --git a/kernel/sys.c b/kernel/sys.c
index b95d3c72ba2..2bbd9a73b54 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -362,6 +362,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_reboot_notifier);
+/* Add backwards compatibility for stable trees. */
+#ifndef PF_NO_SETAFFINITY
+#define PF_NO_SETAFFINITY PF_THREAD_BOUND
+#endif
+
+static void migrate_to_reboot_cpu(void)
+{
+ /* The boot cpu is always logical cpu 0 */
+ int cpu = 0;
+
+ cpu_hotplug_disable();
+
+ /* Make certain the cpu I'm about to reboot on is online */
+ if (!cpu_online(cpu))
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* Prevent races with other tasks migrating this task */
+ current->flags |= PF_NO_SETAFFINITY;
+
+ /* Make certain I only run on the appropriate processor */
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+}
+
/**
* kernel_restart - reboot the system
* @cmd: pointer to buffer containing command to execute for restart
@@ -373,7 +396,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
- disable_nonboot_cpus();
+ migrate_to_reboot_cpu();
syscore_shutdown();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
@@ -400,7 +423,7 @@ static void kernel_shutdown_prepare(enum system_states state)
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
- disable_nonboot_cpus();
+ migrate_to_reboot_cpu();
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
@@ -419,7 +442,7 @@ void kernel_power_off(void)
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
if (pm_power_off_prepare)
pm_power_off_prepare();
- disable_nonboot_cpus();
+ migrate_to_reboot_cpu();
syscore_shutdown();
printk(KERN_EMERG "Power down.\n");
kmsg_dump(KMSG_DUMP_POWEROFF);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 12ff13a838c..8f5b3b98577 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -874,7 +874,6 @@ static void hardpps_update_phase(long error)
void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
{
struct pps_normtime pts_norm, freq_norm;
- unsigned long flags;
pts_norm = pps_normalize_ts(*phase_ts);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 24938d57766..0c739423b0f 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -511,6 +511,12 @@ again:
}
}
+ /*
+ * Remove the current cpu from the pending mask. The event is
+ * delivered immediately in tick_do_broadcast() !
+ */
+ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
+
/* Take care of enforced broadcast requests */
cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
cpumask_clear(tick_broadcast_force_mask);
@@ -575,8 +581,8 @@ void tick_broadcast_oneshot_control(unsigned long reason)
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
- WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
+ WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
/*
* We only reprogram the broadcast timer if we
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 98cd470bbe4..baeeb5c87cf 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -975,6 +975,14 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
+ /*
+ * On some systems the persistent_clock can not be detected at
+ * timekeeping_init by its return value, so if we see a valid
+ * value returned, update the persistent_clock_exists flag.
+ */
+ if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
+ persistent_clock_exist = true;
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&timekeeper_seq);
timekeeping_forward_now(tk);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b549b0f5b97..6c508ff33c6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
- * mechanism. The rcu_dereference_raw() calls are needed to handle
+ * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
#define do_for_each_ftrace_op(op, list) \
- op = rcu_dereference_raw(list); \
+ op = rcu_dereference_raw_notrace(list); \
do
/*
* Optimized for just a single item in the list (as that is the normal case).
*/
#define while_for_each_ftrace_op(op) \
- while (likely(op = rcu_dereference_raw((op)->next)) && \
+ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;
- hlist_for_each_entry_rcu(rec, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key];
- hlist_for_each_entry_rcu(entry, hhd, hlist) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash;
int ret;
- filter_hash = rcu_dereference_raw(ops->filter_hash);
- notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+ filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
- hlist_for_each_entry_rcu(entry, hhd, node) {
+ hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4d79485b323..e71a8be4a6e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -652,8 +652,6 @@ static struct {
ARCH_TRACE_CLOCKS
};
-int trace_clock_id;
-
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
@@ -843,7 +841,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
- max_data->uid = task_uid(tsk);
+ /*
+ * If tsk == current, then use current_uid(), as that does not use
+ * RCU. The irq tracer can be called out of RCU scope.
+ */
+ if (tsk == current)
+ max_data->uid = current_uid();
+ else
+ max_data->uid = task_uid(tsk);
+
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
@@ -2818,7 +2824,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
- if (trace_clocks[trace_clock_id].in_ns)
+ if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
/* stop the trace while dumping if we are not opening "snapshot" */
@@ -3817,7 +3823,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
- if (trace_clocks[trace_clock_id].in_ns)
+ if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter->cpu_file = tc->cpu;
@@ -5087,7 +5093,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
- if (trace_clocks[trace_clock_id].in_ns) {
+ if (trace_clocks[tr->clock_id].in_ns) {
/* local or global for trace_clock */
t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 711ca7d3e7f..20572ed88c5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -700,8 +700,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
extern unsigned long trace_flags;
-extern int trace_clock_id;
-
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 55e2cf66967..2901e3b8859 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5f9c44cdf1f..4cc6442733f 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -37,7 +37,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
mpi_limb_t a;
MPI val = NULL;
- while (nbytes >= 0 && buffer[0] == 0) {
+ while (nbytes > 0 && buffer[0] == 0) {
buffer++;
nbytes--;
}
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 538367ef137..1b24bdcb319 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
return;
frontswap_ops->invalidate_area(type);
atomic_set(&sis->frontswap_pages, 0);
- memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+ bitmap_zero(sis->frontswap_map, sis->max);
}
clear_bit(type, need_init);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8feeeca668..e2bfbf73a55 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
- migration_entry_wait(mm, (pmd_t *)ptep, address);
+ migration_entry_wait_huge(mm, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010d6c14129..194721839cf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
mz = mem_cgroup_zoneinfo(root, nid, zid);
iter = &mz->reclaim_iter[reclaim->priority];
- last_visited = iter->last_visited;
if (prev && reclaim->generation != iter->generation) {
iter->last_visited = NULL;
goto out_unlock;
@@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
* is alive.
*/
dead_count = atomic_read(&root->dead_count);
- smp_rmb();
- last_visited = iter->last_visited;
- if (last_visited) {
- if ((dead_count != iter->last_dead_count) ||
- !css_tryget(&last_visited->css)) {
+ if (dead_count == iter->last_dead_count) {
+ smp_rmb();
+ last_visited = iter->last_visited;
+ if (last_visited &&
+ !css_tryget(&last_visited->css))
last_visited = NULL;
- }
}
}
@@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
return -ENOMEM;
}
- INIT_WORK(&s->memcg_params->destroy,
- kmem_cache_destroy_work_func);
s->memcg_params->is_root_cache = true;
/*
diff --git a/mm/migrate.c b/mm/migrate.c
index b1f57501de9..6f0c24438bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
*/
-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address)
+static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl)
{
- pte_t *ptep, pte;
- spinlock_t *ptl;
+ pte_t pte;
swp_entry_t entry;
struct page *page;
- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+ spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
goto out;
@@ -236,6 +235,20 @@ out:
pte_unmap_unlock(ptep, ptl);
}
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ spinlock_t *ptl = pte_lockptr(mm, pmd);
+ pte_t *ptep = pte_offset_map(pmd, address);
+ __migration_entry_wait(mm, ptep, ptl);
+}
+
+void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
+{
+ spinlock_t *ptl = &(mm)->page_table_lock;
+ __migration_entry_wait(mm, pte, ptl);
+}
+
#ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */
static bool buffer_migrate_lock_buffers(struct buffer_head *head,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 378a15bcd64..c3edb624fcc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
long min = mark;
long lowmem_reserve = z->lowmem_reserve[classzone_idx];
int o;
+ long free_cma = 0;
free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */
if (!(alloc_flags & ALLOC_CMA))
- free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
+ free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
- if (free_pages <= min + lowmem_reserve)
+
+ if (free_pages - free_cma <= min + lowmem_reserve)
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b3d40dcf362..f24ab0dff55 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
- if (err == -EEXIST) { /* seems racy */
+ if (err == -EEXIST) {
radix_tree_preload_end();
+ /*
+ * We might race against get_swap_page() and stumble
+ * across a SWAP_HAS_CACHE swap_map entry whose page
+ * has not been brought into the swapcache yet, while
+ * the other end is scheduled away waiting on discard
+ * I/O completion at scan_swap_map().
+ *
+ * In order to avoid turning this transitory state
+ * into a permanent loop around this -EEXIST case
+ * if !CONFIG_PREEMPT and the I/O completion happens
+ * to be waiting on the CPU waitqueue where we are now
+ * busy looping, we just conditionally invoke the
+ * schedul