aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/debugobjects.tmpl2
-rw-r--r--Documentation/cdrom/packet-writing.txt2
-rw-r--r--Documentation/driver-model/device.txt32
-rw-r--r--Documentation/fault-injection/fault-injection.txt70
-rw-r--r--Documentation/filesystems/vfat.txt5
-rw-r--r--Documentation/firmware_class/README3
-rw-r--r--Documentation/hwmon/f71882fg12
-rw-r--r--Documentation/hwmon/ibmaem2
-rw-r--r--Documentation/hwmon/sysfs-interface19
-rw-r--r--Documentation/hwmon/tmp40142
-rw-r--r--Documentation/hwmon/w83627ehf11
-rw-r--r--Documentation/i2c/busses/i2c-viapro4
-rw-r--r--Documentation/kmemcheck.txt773
-rw-r--r--Documentation/kprobes.txt6
-rw-r--r--Documentation/trace/ftrace.txt233
-rw-r--r--Documentation/trace/mmiotrace.txt26
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h50
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h377
-rw-r--r--arch/blackfin/Kconfig60
-rw-r--r--arch/blackfin/Makefile7
-rw-r--r--arch/blackfin/boot/.gitignore3
-rw-r--r--arch/blackfin/boot/Makefile31
-rw-r--r--arch/blackfin/include/asm/atomic.h16
-rw-r--r--arch/blackfin/include/asm/bfin-global.h11
-rw-r--r--arch/blackfin/include/asm/bitops.h3
-rw-r--r--arch/blackfin/include/asm/bug.h57
-rw-r--r--arch/blackfin/include/asm/cache.h11
-rw-r--r--arch/blackfin/include/asm/cacheflush.h3
-rw-r--r--arch/blackfin/include/asm/cpu.h1
-rw-r--r--arch/blackfin/include/asm/ftrace.h14
-rw-r--r--arch/blackfin/include/asm/ipipe.h28
-rw-r--r--arch/blackfin/include/asm/irq.h271
-rw-r--r--arch/blackfin/include/asm/irqflags.h63
-rw-r--r--arch/blackfin/include/asm/mutex-dec.h112
-rw-r--r--arch/blackfin/include/asm/sections.h11
-rw-r--r--arch/blackfin/include/asm/system.h4
-rw-r--r--arch/blackfin/include/asm/unistd.h3
-rw-r--r--arch/blackfin/kernel/Makefile5
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c4
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c5
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c2
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c54
-rw-r--r--arch/blackfin/kernel/early_printk.c8
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S140
-rw-r--r--arch/blackfin/kernel/ftrace.c42
-rw-r--r--arch/blackfin/kernel/ipipe.c42
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/stacktrace.c53
-rw-r--r--arch/blackfin/kernel/traps.c128
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S17
-rw-r--r--arch/blackfin/lib/checksum.c2
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c12
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c5
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c5
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c5
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c5
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c5
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c10
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c10
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c23
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-common/cache-c.c14
-rw-r--r--arch/blackfin/mach-common/entry.S12
-rw-r--r--arch/blackfin/mach-common/smp.c24
-rw-r--r--arch/mips/configs/bigsur_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mips/sni/eisa.c2
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/boot/install.sh3
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/include/asm/atomic.h3
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/ps3.h18
-rw-r--r--arch/powerpc/include/asm/ps3gpu.h86
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c37
-rw-r--r--arch/powerpc/platforms/iseries/dt.c3
-rw-r--r--arch/powerpc/platforms/iseries/mf.c3
-rw-r--r--arch/powerpc/platforms/ps3/mm.c12
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c142
-rw-r--r--arch/powerpc/platforms/ps3/platform.h10
-rw-r--r--arch/powerpc/platforms/ps3/setup.c1
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c15
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/Makefile4
-rw-r--r--arch/s390/appldata/appldata_base.c119
-rw-r--r--arch/s390/include/asm/ccwdev.h19
-rw-r--r--arch/s390/include/asm/ccwgroup.h10
-rw-r--r--arch/s390/include/asm/suspend.h10
-rw-r--r--arch/s390/include/asm/system.h22
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/mem_detect.c19
-rw-r--r--arch/s390/kernel/smp.c38
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/s390/power/Makefile8
-rw-r--r--arch/s390/power/suspend.c40
-rw-r--r--arch/s390/power/swsusp.c30
-rw-r--r--arch/s390/power/swsusp_64.c17
-rw-r--r--arch/s390/power/swsusp_asm64.S199
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/configs/sparc64_defconfig63
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/dma-mapping.h168
-rw-r--r--arch/sparc/include/asm/dma-mapping_32.h60
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h154
-rw-r--r--arch/sparc/include/asm/ftrace.h11
-rw-r--r--arch/sparc/include/asm/mdesc.h3
-rw-r--r--arch/sparc/include/asm/percpu_64.h8
-rw-r--r--arch/sparc/include/asm/prom.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h207
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpumap.c431
-rw-r--r--arch/sparc/kernel/cpumap.h16
-rw-r--r--arch/sparc/kernel/dma.c127
-rw-r--r--arch/sparc/kernel/ds.c3
-rw-r--r--arch/sparc/kernel/ftrace.c47
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/iommu.c15
-rw-r--r--arch/sparc/kernel/irq_64.c29
-rw-r--r--arch/sparc/kernel/mdesc.c149
-rw-r--r--arch/sparc/kernel/of_device_32.c195
-rw-r--r--arch/sparc/kernel/of_device_64.c188
-rw-r--r--arch/sparc/kernel/of_device_common.c174
-rw-r--r--arch/sparc/kernel/of_device_common.h36
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
-rw-r--r--arch/sparc/kernel/prom.h1
-rw-r--r--arch/sparc/kernel/prom_64.c232
-rw-r--r--arch/sparc/kernel/prom_common.c2
-rw-r--r--arch/sparc/kernel/smp_64.c196
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/traps_64.c170
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c16
-rw-r--r--arch/sparc/mm/srmmu.c3
-rw-r--r--arch/um/drivers/net_kern.c4
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/include/asm/dma-mapping.h7
-rw-r--r--arch/x86/include/asm/kmemcheck.h42
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h9
-rw-r--r--arch/x86/include/asm/string_32.h8
-rw-r--r--arch/x86/include/asm/string_64.h8
-rw-r--r--arch/x86/include/asm/thread_info.h4
-rw-r--r--arch/x86/include/asm/xor.h5
-rw-r--r--arch/x86/kernel/cpu/intel.c23
-rw-r--r--arch/x86/kernel/cpuid.c6
-rw-r--r--arch/x86/kernel/microcode_core.c1
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/stacktrace.c7
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/fault.c18
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/kmemcheck/Makefile1
-rw-r--r--arch/x86/mm/kmemcheck/error.c228
-rw-r--r--arch/x86/mm/kmemcheck/error.h15
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c640
-rw-r--r--arch/x86/mm/kmemcheck/opcode.c106
-rw-r--r--arch/x86/mm/kmemcheck/opcode.h9
-rw-r--r--arch/x86/mm/kmemcheck/pte.c22
-rw-r--r--arch/x86/mm/kmemcheck/pte.h10
-rw-r--r--arch/x86/mm/kmemcheck/selftest.c69
-rw-r--r--arch/x86/mm/kmemcheck/selftest.h6
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c162
-rw-r--r--arch/x86/mm/kmemcheck/shadow.h16
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pgtable.c12
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-settings.c37
-rw-r--r--block/bsg.c6
-rw-r--r--block/cfq-iosched.c7
-rw-r--r--block/genhd.c10
-rw-r--r--crypto/xor.c7
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h3
-rw-r--r--drivers/acpi/acpica/aclocal.h12
-rw-r--r--drivers/acpi/acpica/acnamesp.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/dsobject.c5
-rw-r--r--drivers/acpi/acpica/dsopcode.c17
-rw-r--r--drivers/acpi/acpica/dswstate.c4
-rw-r--r--drivers/acpi/acpica/evregion.c12
-rw-r--r--drivers/acpi/acpica/evxfevnt.c4
-rw-r--r--drivers/acpi/acpica/exconfig.c125
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exfldio.c20
-rw-r--r--drivers/acpi/acpica/exmutex.c45
-rw-r--r--drivers/acpi/acpica/exstore.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c4
-rw-r--r--drivers/acpi/acpica/nsalloc.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c9
-rw-r--r--drivers/acpi/acpica/nspredef.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c4
-rw-r--r--drivers/acpi/acpica/nswalk.c69
-rw-r--r--drivers/acpi/acpica/nsxfname.c150
-rw-r--r--drivers/acpi/acpica/nsxfobj.c9
-rw-r--r--drivers/acpi/acpica/rscalc.c5
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c23
-rw-r--r--drivers/acpi/acpica/utdebug.c8
-rw-r--r--drivers/acpi/acpica/utdelete.c21
-rw-r--r--drivers/acpi/acpica/utmisc.c20
-rw-r--r--drivers/acpi/acpica/utmutex.c26
-rw-r--r--drivers/base/core.c57
-rw-r--r--drivers/base/dd.c6
-rw-r--r--drivers/base/firmware_class.c27
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/sys.c8
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/mg_disk.c28
-rw-r--r--drivers/block/pktcdvd.c9
-rw-r--r--drivers/block/ps3disk.c18
-rw-r--r--drivers/block/ps3vram.c168
-rw-r--r--drivers/block/xen-blkfront.c14
-rw-r--r--drivers/char/hvc_iucv.c204
-rw-r--r--drivers/char/hvcs.c6
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/misc.c15
-rw-r--r--drivers/char/ps3flash.c296
-rw-r--r--drivers/char/pty.c53
-rw-r--r--drivers/char/raw.c6
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/tty_ioctl.c5
-rw-r--r--drivers/char/tty_ldisc.c9
-rw-r--r--drivers/eisa/pci_eisa.c2
-rw-r--r--drivers/eisa/virtual_root.c2
-rw-r--r--drivers/firewire/Makefile8
-rw-r--r--drivers/firewire/core-card.c (renamed from drivers/firewire/fw-card.c)25
-rw-r--r--drivers/firewire/core-cdev.c (renamed from drivers/firewire/fw-cdev.c)13
-rw-r--r--drivers/firewire/core-device.c (renamed from drivers/firewire/fw-device.c)154
-rw-r--r--drivers/firewire/core-iso.c (renamed from drivers/firewire/fw-iso.c)6
-rw-r--r--drivers/firewire/core-topology.c (renamed from drivers/firewire/fw-topology.c)24
-rw-r--r--drivers/firewire/core-transaction.c (renamed from drivers/firewire/fw-transaction.c)42
-rw-r--r--drivers/firewire/core.h293
-rw-r--r--drivers/firewire/fw-device.h202
-rw-r--r--drivers/firewire/fw-topology.h77
-rw-r--r--drivers/firewire/fw-transaction.h446
-rw-r--r--drivers/firewire/ohci.c (renamed from drivers/firewire/fw-ohci.c)19
-rw-r--r--drivers/firewire/ohci.h (renamed from drivers/firewire/fw-ohci.h)6
-rw-r--r--drivers/firewire/sbp2.c (renamed from drivers/firewire/fw-sbp2.c)64
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/hwmon/Kconfig16
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/f71882fg.c250
-rw-r--r--drivers/hwmon/hwmon.c29
-rw-r--r--drivers/hwmon/ibmaem.c1
-rw-r--r--drivers/hwmon/max6650.c86
-rw-r--r--drivers/hwmon/sht15.c10
-rw-r--r--drivers/hwmon/tmp401.c690
-rw-r--r--drivers/hwmon/w83627ehf.c10
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c1
-rw-r--r--drivers/i2c/chips/Kconfig15
-rw-r--r--drivers/i2c/chips/Makefile1
-rw-r--r--drivers/i2c/i2c-core.c43
-rw-r--r--drivers/ide/ide-pm.c6
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide_platform.c2
-rw-r--r--drivers/ieee1394/csr1212.c2
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/ieee1394/sbp2.c8
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/input/xen-kbdfront.c8
-rw-r--r--drivers/macintosh/therm_adt746x.c84
-rw-r--r--drivers/macintosh/therm_pm72.c95
-rw-r--r--drivers/macintosh/therm_windtunnel.c126
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c129
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c103
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c109
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c10
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c4
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c2
-rw-r--r--drivers/media/video/dabusb.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c22
-rw-r--r--drivers/mfd/htc-pasic3.c4
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/eeprom/Kconfig14
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/max6875.c (renamed from drivers/i2c/chips/max6875.c)2
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/ps3_gelic_net.c8
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig5
-rw-r--r--drivers/net/wireless/libertas/README12
-rw-r--r--drivers/net/wireless/libertas/if_spi.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.h3
-rw-r--r--drivers/net/wireless/libertas/if_usb.c8
-rw-r--r--drivers/net/xen-netfront.c10
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pcmcia/ds.c20
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3av.c8
-rw-r--r--drivers/ps3/ps3av_cmd.c3
-rw-r--r--drivers/s390/block/dasd.c109
-rw-r--r--drivers/s390/block/dasd_devmap.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c108
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dcssblk.c132
-rw-r--r--drivers/s390/block/xpram.c129
-rw-r--r--drivers/s390/char/con3215.c221
-rw-r--r--drivers/s390/char/con3270.c12
-rw-r--r--drivers/s390/char/fs3270.c16
-rw-r--r--drivers/s390/char/monreader.c140
-rw-r--r--drivers/s390/char/monwriter.c98
-rw-r--r--drivers/s390/char/raw3270.c100
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c248
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c42
-rw-r--r--drivers/s390/char/sclp_con.c139
-rw-r--r--drivers/s390/char/sclp_rw.c20
-rw-r--r--drivers/s390/char/sclp_rw.h12
-rw-r--r--drivers/s390/char/sclp_vt220.c118
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_34xx.c5
-rw-r--r--drivers/s390/char/tape_3590.c5
-rw-r--r--drivers/s390/char/tape_core.c74
-rw-r--r--drivers/s390/char/vmlogrdr.c39
-rw-r--r--drivers/s390/char/vmur.c42
-rw-r--r--drivers/s390/char/vmwatchdog.c81
-rw-r--r--drivers/s390/cio/ccwgroup.c78
-rw-r--r--drivers/s390/cio/chsc.c3
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/chsc_sch.c32
-rw-r--r--drivers/s390/cio/cmf.c5
-rw-r--r--drivers/s390/cio/css.c157
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c260
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c96
-rw-r--r--drivers/s390/cio/device_ops.c26
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/net/claw.c66
-rw-r--r--drivers/s390/net/ctcm_main.c37
-rw-r--r--drivers/s390/net/lcs.c94
-rw-r--r--drivers/s390/net/lcs.h4
-rw-r--r--drivers/s390/net/netiucv.c162
-rw-r--r--drivers/s390/net/qeth_core_main.c51
-rw-r--r--drivers/s390/net/qeth_l2_main.c52
-rw-r--r--drivers/s390/net/qeth_l3_main.c52
-rw-r--r--drivers/s390/net/smsgiucv.c63
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c33
-rw-r--r--drivers/sbus/char/openprom.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/serial/atmel_serial.c8
-rw-r--r--drivers/serial/imx.c12
-rw-r--r--drivers/serial/of_serial.c4
-rw-r--r--drivers/spi/spi_bfin5xx.c4
-rw-r--r--drivers/staging/uc2322/aten2011.c4
-rw-r--r--drivers/thermal/thermal_sys.c4
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/ueagle-atm.c9
-rw-r--r--drivers/usb/class/cdc-acm.c71
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/usblp.c6
-rw-r--r--drivers/usb/class/usbtmc.c6
-rw-r--r--drivers/usb/core/Kconfig16
-rw-r--r--drivers/usb/core/Makefile4
-rw-r--r--drivers/usb/core/config.c192
-rw-r--r--drivers/usb/core/driver.c56
-rw-r--r--drivers/usb/core/endpoint.c160
-rw-r--r--drivers/usb/core/file.c13
-rw-r--r--drivers/usb/core/hcd-pci.c244
-rw-r--r--drivers/usb/core/hcd.c220
-rw-r--r--drivers/usb/core/hcd.h55
-rw-r--r--drivers/usb/core/hub.c134
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/message.c194
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c87
-rw-r--r--drivers/usb/core/usb.h13
-rw-r--r--drivers/usb/gadget/Kconfig53
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c20
-rw-r--r--drivers/usb/gadget/audio.c302
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c6
-rw-r--r--drivers/usb/gadget/f_audio.c707
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c93
-rw-r--r--drivers/usb/gadget/fsl_mx3_udc.c95
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c (renamed from drivers/usb/gadget/fsl_usb2_udc.c)69
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h18
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/goku_udc.c6
-rw-r--r--drivers/usb/gadget/imx_udc.c14
-rw-r--r--drivers/usb/gadget/inode.c14
-rw-r--r--drivers/usb/gadget/langwell_udc.c3373
-rw-r--r--drivers/usb/gadget/langwell_udc.h228
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c71
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c3269
-rw-r--r--drivers/usb/gadget/u_audio.c319
-rw-r--r--drivers/usb/gadget/u_audio.h56
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/host/Kconfig20
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c47
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c1
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c8
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-sched.c8
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/hwa-hc.c21
-rw-r--r--drivers/usb/host/ohci-dbg.c31
-rw-r--r--drivers/usb/host/ohci-hcd.c38
-rw-r--r--drivers/usb/host/ohci-pci.c24
-rw-r--r--drivers/usb/host/ohci-ps3.c7
-rw-r--r--drivers/usb/host/pci-quirks.c123
-rw-r--r--drivers/usb/host/r8a66597-hcd.c62
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-hcd.c23
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c485
-rw-r--r--drivers/usb/host/xhci-ext-caps.h145
-rw-r--r--drivers/usb/host/xhci-hcd.c1274
-rw-r--r--drivers/usb/host/xhci-hub.c308
-rw-r--r--drivers/usb/host/xhci-mem.c769
-rw-r--r--drivers/usb/host/xhci-pci.c166
-rw-r--r--drivers/usb/host/xhci-ring.c1648
-rw-r--r--drivers/usb/host/xhci.h1157
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usbtest.c39
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/blackfin.c11
-rw-r--r--drivers/usb/musb/cppi_dma.c34
-rw-r--r--drivers/usb/musb/cppi_dma.h6
-rw-r--r--drivers/usb/musb/davinci.c54
-rw-r--r--drivers/usb/musb/musb_core.c228
-rw-r--r--drivers/usb/musb/musb_core.h22
-rw-r--r--drivers/usb/musb/musb_gadget.c45
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c45
-rw-r--r--drivers/usb/musb/musb_host.c273
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c35
-rw-r--r--drivers/usb/musb/omap2430.c71
-rw-r--r--drivers/usb/musb/tusb6010.c70
-rw-r--r--drivers/usb/otg/Kconfig14
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/langwell_otg.c1915
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c25
-rw-r--r--drivers/usb/otg/twl4030-usb.c28
-rw-r--r--drivers/usb/serial/aircable.c5
-rw-r--r--drivers/usb/serial/belkin_sa.c7
-rw-r--r--drivers/usb/serial/bus.c27
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/cyberjack.c20
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c20
-rw-r--r--drivers/usb/serial/empeg.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c179
-rw-r--r--drivers/usb/serial/ftdi_sio.h13
-rw-r--r--drivers/usb/serial/garmin_gps.c214
-rw-r--r--drivers/usb/serial/generic.c186
-rw-r--r--drivers/usb/serial/io_edgeport.c29
-rw-r--r--drivers/usb/serial/io_tables.h12
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/ipaq.c7
-rw-r--r--drivers/usb/serial/iuu_phoenix.c6
-rw-r--r--drivers/usb/serial/keyspan.c13
-rw-r--r--drivers/usb/serial/keyspan.h12
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c39
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c358
-rw-r--r--drivers/usb/serial/omninet.c19
-rw-r--r--drivers/usb/serial/opticon.c14
-rw-r--r--drivers/usb/serial/option.c41
-rw-r--r--drivers/usb/serial/oti6858.c7
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/sierra.c185
-rw-r--r--drivers/usb/serial/spcp8x5.c5
-rw-r--r--drivers/usb/serial/symbolserial.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial.c53
-rw-r--r--drivers/usb/serial/usb_debug.c41
-rw-r--r--drivers/usb/serial/visor.c13
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/storage/initializers.c14
-rw-r--r--drivers/usb/storage/option_ms.c124
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/bw2.c20
-rw-r--r--drivers/video/cg14.c19
-rw-r--r--drivers/video/cg3.c20
-rw-r--r--drivers/video/cg6.c25
-rw-r--r--drivers/video/leo.c14
-rw-r--r--drivers/video/p9100.c20
-rw-r--r--drivers/video/ps3fb.c272
-rw-r--r--drivers/video/tdfxfb.c1
-rw-r--r--drivers/video/xen-fbfront.c8
-rw-r--r--fs/befs/linuxvfs.c20
-rw-r--r--fs/bio.c1
-rw-r--r--fs/btrfs/disk-io.c26
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/debugfs/file.c65
-rw-r--r--fs/debugfs/inode.c11
-rw-r--r--fs/fat/cache.c6
-rw-r--r--fs/fat/dir.c31
-rw-r--r--fs/fat/fat.h7
-rw-r--r--fs/fat/fatent.c4
-rw-r--r--fs/fat/file.c184
-rw-r--r--fs/fat/inode.c28
-rw-r--r--fs/fat/misc.c22
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/fcntl.c33
-rw-r--r--fs/isofs/joliet.c36
-rw-r--r--fs/jfs/jfs_extent.c1
-rw-r--r--fs/ncpfs/ncplib_kernel.c8
-rw-r--r--fs/nls/nls_base.c166
-rw-r--r--fs/nls/nls_utf8.c13
-rw-r--r--fs/ocfs2/alloc.c80
-rw-r--r--fs/ocfs2/blockcheck.c184
-rw-r--r--fs/ocfs2/blockcheck.h29
-rw-r--r--fs/ocfs2/cluster/masklog.h35
-rw-r--r--fs/ocfs2/cluster/tcp.c7
-rw-r--r--fs/ocfs2/dir.c21
-rw-r--r--fs/ocfs2/dlmglue.c51
-rw-r--r--fs/ocfs2/dlmglue.h11
-rw-r--r--fs/ocfs2/file.c56
-rw-r--r--fs/ocfs2/journal.c111
-rw-r--r--fs/ocfs2/journal.h4
-rw-r--r--fs/ocfs2/ocfs2.h16
-rw-r--r--fs/ocfs2/ocfs2_lockid.h5
-rw-r--r--fs/ocfs2/quota_global.c4
-rw-r--r--fs/ocfs2/quota_local.c21
-rw-r--r--fs/ocfs2/super.c66
-rw-r--r--fs/ocfs2/xattr.c5
-rw-r--r--fs/sysfs/symlink.c5
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--include/acpi/acpixf.h9
-rw-r--r--include/acpi/actypes.h20
-rw-r--r--include/acpi/platform/acgcc.h4
-rw-r--r--include/acpi/platform/aclinux.h63
-rw-r--r--include/asm-generic/atomic64.h42
-rw-r--r--include/linux/bio.h5
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/eisa.h4
-rw-r--r--include/linux/firewire.h358
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h14
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kmemcheck.h153
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/nls.h35
-rw-r--r--include/linux/pci.h8
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/platform_device.h4
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h7
-rw-r--r--include/linux/slab_def.h81
-rw-r--r--include/linux/stacktrace.h3
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/usb.h37
-rw-r--r--include/linux/usb/audio.h265
-rw-r--r--include/linux/usb/ch9.h17
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/langwell_otg.h177
-rw-r--r--include/linux/usb/langwell_udc.h310
-rw-r--r--include/linux/usb/otg.h4
-rw-r--r--include/linux/usb/r8a66597.h44
-rw-r--r--include/linux/usb/serial.h32
-rw-r--r--include/net/inet_sock.h14
-rw-r--r--include/net/inet_timewait_sock.h5
-rw-r--r--include/net/sock.h2
-rw-r--r--init/Kconfig6
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/main.c1
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/printk.c33
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/softirq.c11
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/trace/Kconfig10
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/user.c67
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64.c175
-rw-r--r--lib/kobject.c7
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/Makefile1
-rw-r--r--mm/bounce.c1
-rw-r--r--mm/highmem.c1
-rw-r--r--mm/kmemcheck.c122
-rw-r--r--mm/page_alloc.c18
-rw-r--r--mm/slab.c108
-rw-r--r--mm/slub.c38
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c3
-rw-r--r--net/iucv/af_iucv.c147
-rw-r--r--net/iucv/iucv.c268
-rw-r--r--samples/Kconfig3
-rw-r--r--samples/firmware_class/firmware_sample_driver.c121
-rw-r--r--samples/firmware_class/firmware_sample_firmware_class.c204
-rwxr-xr-xscripts/recordmcount.pl20
-rw-r--r--scripts/tracing/draw_functrace.py7
-rw-r--r--sound/pci/ctxfi/ctatc.c22
-rw-r--r--sound/pci/ctxfi/ctatc.h4
-rw-r--r--sound/pci/ctxfi/cttimer.c18
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/intel8x0.c24
-rw-r--r--sound/soc/codecs/ssm2602.c4
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/pxa/magician.c2
-rw-r--r--sound/soc/soc-core.c3
-rw-r--r--sound/sound_core.c7
-rw-r--r--sound/usb/usbquirks.h2
693 files changed, 35879 insertions, 7805 deletions
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
index 7f5f218015f..08ff908aa7a 100644
--- a/Documentation/DocBook/debugobjects.tmpl
+++ b/Documentation/DocBook/debugobjects.tmpl
@@ -106,7 +106,7 @@
number of errors are printk'ed including a full stack trace.
</para>
<para>
- The statistics are available via debugfs/debug_objects/stats.
+ The statistics are available via /sys/kernel/debug/debug_objects/stats.
They provide information about the number of warnings and the
number of successful fixups along with information about the
usage of the internal tracking objects and the state of the
diff --git a/Documentation/cdrom/packet-writing.txt b/Documentation/cdrom/packet-writing.txt
index cf1f8126991..1c407778c8b 100644
--- a/Documentation/cdrom/packet-writing.txt
+++ b/Documentation/cdrom/packet-writing.txt
@@ -117,7 +117,7 @@ Using the pktcdvd debugfs interface
To read pktcdvd device infos in human readable form, do:
- # cat /debug/pktcdvd/pktcdvd[0-7]/info
+ # cat /sys/kernel/debug/pktcdvd/pktcdvd[0-7]/info
For a description of the debugfs interface look into the file:
diff --git a/Documentation/driver-model/device.txt b/Documentation/driver-model/device.txt
index a7cbfff40d0..a124f3126b0 100644
--- a/Documentation/driver-model/device.txt
+++ b/Documentation/driver-model/device.txt
@@ -162,3 +162,35 @@ device_remove_file(dev,&dev_attr_power);
The file name will be 'power' with a mode of 0644 (-rw-r--r--).
+Word of warning: While the kernel allows device_create_file() and
+device_remove_file() to be called on a device at any time, userspace has
+strict expectations on when attributes get created. When a new device is
+registered in the kernel, a uevent is generated to notify userspace (like
+udev) that a new device is available. If attributes are added after the
+device is registered, then userspace won't get notified and userspace will
+not know about the new attributes.
+
+This is important for device driver that need to publish additional
+attributes for a device at driver probe time. If the device driver simply
+calls device_create_file() on the device structure passed to it, then
+userspace will never be notified of the new attributes. Instead, it should
+probably use class_create() and class->dev_attrs to set up a list of
+desired attributes in the modules_init function, and then in the .probe()
+hook, and then use device_create() to create a new device as a child
+of the probed device. The new device will generate a new uevent and
+properly advertise the new attributes to userspace.
+
+For example, if a driver wanted to add the following attributes:
+struct device_attribute mydriver_attribs[] = {
+ __ATTR(port_count, 0444, port_count_show),
+ __ATTR(serial_number, 0444, serial_number_show),
+ NULL
+};
+
+Then in the module init function is would do:
+ mydriver_class = class_create(THIS_MODULE, "my_attrs");
+ mydriver_class.dev_attr = mydriver_attribs;
+
+And assuming 'dev' is the struct device passed into the probe hook, the driver
+probe function would do something like:
+ create_device(&mydriver_class, dev, chrdev, &private_data, "my_name");
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4bc374a1434..07930564079 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -29,16 +29,16 @@ o debugfs entries
fault-inject-debugfs kernel module provides some debugfs entries for runtime
configuration of fault-injection capabilities.
-- /debug/fail*/probability:
+- /sys/kernel/debug/fail*/probability:
likelihood of failure injection, in percent.
Format: <percent>
Note that one-failure-per-hundred is a very high error rate
for some testcases. Consider setting probability=100 and configure
- /debug/fail*/interval for such testcases.
+ /sys/kernel/debug/fail*/interval for such testcases.
-- /debug/fail*/interval:
+- /sys/kernel/debug/fail*/interval:
specifies the interval between failures, for calls to
should_fail() that pass all the other tests.
@@ -46,18 +46,18 @@ configuration of fault-injection capabilities.
Note that if you enable this, by setting interval>1, you will
probably want to set probability=100.
-- /debug/fail*/times:
+- /sys/kernel/debug/fail*/times:
specifies how many times failures may happen at most.
A value of -1 means "no limit".
-- /debug/fail*/space:
+- /sys/kernel/debug/fail*/space:
specifies an initial resource "budget", decremented by "size"
on each call to should_fail(,size). Failure injection is
suppressed until "space" reaches zero.
-- /debug/fail*/verbose
+- /sys/kernel/debug/fail*/verbose
Format: { 0 | 1 | 2 }
specifies the verbosity of the messages when failure is
@@ -65,17 +65,17 @@ configuration of fault-injection capabilities.
log line per failure; '2' will print a call trace too -- useful
to debug the problems revealed by fault injection.
-- /debug/fail*/task-filter:
+- /sys/kernel/debug/fail*/task-filter:
Format: { 'Y' | 'N' }
A value of 'N' disables filtering by process (default).
Any positive value limits failures to only processes indicated by
/proc/<pid>/make-it-fail==1.
-- /debug/fail*/require-start:
-- /debug/fail*/require-end:
-- /debug/fail*/reject-start:
-- /debug/fail*/reject-end:
+- /sys/kernel/debug/fail*/require-start:
+- /sys/kernel/debug/fail*/require-end:
+- /sys/kernel/debug/fail*/reject-start:
+- /sys/kernel/debug/fail*/reject-end:
specifies the range of virtual addresses tested during
stacktrace walking. Failure is injected only if some caller
@@ -84,26 +84,26 @@ configuration of fault-injection capabilities.
Default required range is [0,ULONG_MAX) (whole of virtual address space).
Default rejected range is [0,0).
-- /debug/fail*/stacktrace-depth:
+- /sys/kernel/debug/fail*/stacktrace-depth:
specifies the maximum stacktrace depth walked during search
for a caller within [require-start,require-end) OR
[reject-start,reject-end).
-- /debug/fail_page_alloc/ignore-gfp-highmem:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem:
Format: { 'Y' | 'N' }
default is 'N', setting it to 'Y' won't inject failures into
highmem/user allocations.
-- /debug/failslab/ignore-gfp-wait:
-- /debug/fail_page_alloc/ignore-gfp-wait:
+- /sys/kernel/debug/failslab/ignore-gfp-wait:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait:
Format: { 'Y' | 'N' }
default is 'N', setting it to 'Y' will inject failures
only into non-sleep allocations (GFP_ATOMIC allocations).
-- /debug/fail_page_alloc/min-order:
+- /sys/kernel/debug/fail_page_alloc/min-order:
specifies the minimum page allocation order to be injected
failures.
@@ -166,13 +166,13 @@ o Inject slab allocation failures into module init/exit code
#!/bin/bash
FAILTYPE=failslab
-echo Y > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
+echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
faulty_system()
{
@@ -217,20 +217,20 @@ then
exit 1
fi
-cat /sys/module/$module/sections/.text > /debug/$FAILTYPE/require-start
-cat /sys/module/$module/sections/.data > /debug/$FAILTYPE/require-end
+cat /sys/module/$module/sections/.text > /sys/kernel/debug/$FAILTYPE/require-start
+cat /sys/module/$module/sections/.data > /sys/kernel/debug/$FAILTYPE/require-end
-echo N > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
-echo 1 > /debug/$FAILTYPE/ignore-gfp-highmem
-echo 10 > /debug/$FAILTYPE/stacktrace-depth
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-highmem
+echo 10 > /sys/kernel/debug/$FAILTYPE/stacktrace-depth
-trap "echo 0 > /debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
+trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
echo "Injecting errors into the module $module... (interrupt to stop)"
sleep 1000000
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index 5147be5e13c..b58b84b50fa 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -132,6 +132,11 @@ rodir -- FAT has the ATTR_RO (read-only) attribute. On Windows,
If you want to use ATTR_RO as read-only flag even for
the directory, set this option.
+errors=panic|continue|remount-ro
+ -- specify FAT behavior on critical errors: panic, continue
+ without doing anything or remount the partition in
+ read-only mode (default behavior).
+
<bool>: 0,1,yes,no,true,false
TODO
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README
index c3480aa66ba..7eceaff63f5 100644
--- a/Documentation/firmware_class/README
+++ b/Documentation/firmware_class/README
@@ -77,7 +77,8 @@
seconds for the whole load operation.
- request_firmware_nowait() is also provided for convenience in
- non-user contexts.
+ user contexts to request firmware asynchronously, but can't be called
+ in atomic contexts.
about in-kernel persistence:
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index a8321267b5b..bee4c30bc1e 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -2,14 +2,18 @@ Kernel driver f71882fg
======================
Supported chips:
- * Fintek F71882FG and F71883FG
- Prefix: 'f71882fg'
+ * Fintek F71858FG
+ Prefix: 'f71858fg'
Addresses scanned: none, address read from Super I/O config space
Datasheet: Available from the Fintek website
* Fintek F71862FG and F71863FG
Prefix: 'f71862fg'
Addresses scanned: none, address read from Super I/O config space
Datasheet: Available from the Fintek website
+ * Fintek F71882FG and F71883FG
+ Prefix: 'f71882fg'
+ Addresses scanned: none, address read from Super I/O config space
+ Datasheet: Available from the Fintek website
* Fintek F8000
Prefix: 'f8000'
Addresses scanned: none, address read from Super I/O config space
@@ -66,13 +70,13 @@ printed when loading the driver.
Three different fan control modes are supported; the mode number is written
to the pwm#_enable file. Note that not all modes are supported on all
-chips, and some modes may only be available in RPM / PWM mode on the F8000.
+chips, and some modes may only be available in RPM / PWM mode.
Writing an unsupported mode will result in an invalid parameter error.
* 1: Manual mode
You ask for a specific PWM duty cycle / DC voltage or a specific % of
fan#_full_speed by writing to the pwm# file. This mode is only
- available on the F8000 if the fan channel is in RPM mode.
+ available on the F71858FG / F8000 if the fan channel is in RPM mode.
* 2: Normal auto mode
You can define a number of temperature/fan speed trip points, which % the
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem
index e98bdfea346..1e0d59e000b 100644
--- a/Documentation/hwmon/ibmaem
+++ b/Documentation/hwmon/ibmaem
@@ -7,7 +7,7 @@ henceforth as AEM.
Supported systems:
* Any recent IBM System X server with AEM support.
This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2,
- x3950 M2, and certain HS2x/LS2x/QS2x blades. The IPMI host interface
+ x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades. The IPMI host interface
driver ("ipmi-si") needs to be loaded for this driver to do anything.
Prefix: 'ibmaem'
Datasheet: Not available
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 004ee161721..dcbd502c879 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -70,6 +70,7 @@ are interpreted as 0! For more on how written strings are interpreted see the
[0-*] denotes any positive number starting from 0
[1-*] denotes any positive number starting from 1
RO read only value
+WO write only value
RW read/write value
Read/write values may be read-only for some chips, depending on the
@@ -295,6 +296,24 @@ temp[1-*]_label Suggested temperature channel label.
user-space.
RO
+temp[1-*]_lowest
+ Historical minimum temperature
+ Unit: millidegree Celsius
+ RO
+
+temp[1-*]_highest
+ Historical maximum temperature
+ Unit: millidegree Celsius
+ RO
+
+temp[1-*]_reset_history
+ Reset temp_lowest and temp_highest
+ WO
+
+temp_reset_history
+ Reset temp_lowest and temp_highest for all sensors
+ WO
+
Some chips measure temperature using external thermistors and an ADC, and
report the temperature measurement as a voltage. Converting this voltage
back to a temperature (or the other way around for limits) requires
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
new file mode 100644
index 00000000000..9fc44724921
--- /dev/null
+++ b/Documentation/hwmon/tmp401
@@ -0,0 +1,42 @@
+Kernel driver tmp401
+====================
+
+Supported chips:
+ * Texas Instruments TMP401
+ Prefix: 'tmp401'
+ Addresses scanned: I2C 0x4c
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp401.html
+ * Texas Instruments TMP411
+ Prefix: 'tmp411'
+ Addresses scanned: I2C 0x4c
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp411.html
+
+Authors:
+ Hans de Goede <hdegoede@redhat.com>
+ Andre Prendel <andre.prendel@gmx.de>
+
+Description
+-----------
+
+This driver implements support for Texas Instruments TMP401 and
+TMP411 chips. These chips implements one remote and one local
+temperature sensor. Temperature is measured in degrees
+Celsius. Resolution of the remote sensor is 0.0625 degree. Local
+sensor resolution can be set to 0.5, 0.25, 0.125 or 0.0625 degree (not
+supported by the driver so far, so using the default resolution of 0.5
+degree).
+
+The driver provides the common sysfs-interface for temperatures (see
+/Documentation/hwmon/sysfs-interface under Temperatures).
+
+The TMP411 chip is compatible with TMP401. It provides some additional
+features.
+
+* Minimum and Maximum temperature measured since power-on, chip-reset
+
+ Exported via sysfs attributes tempX_lowest and tempX_highest.
+
+* Reset of historical minimum/maximum temperature measurements
+
+ Exported via sysfs attribute temp_reset_history. Writing 1 to this
+ file triggers a reset.
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index b6eb59384bb..02b74899eda 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -12,6 +12,10 @@ Supported chips:
Addresses scanned: ISA address retrieved from Super I/O registers
Datasheet:
http://www.nuvoton.com.tw/NR/rdonlyres/7885623D-A487-4CF9-A47F-30C5F73D6FE6/0/W83627DHG.pdf
+ * Winbond W83627DHG-P
+ Prefix: 'w83627dhg'
+ Addresses scanned: ISA address retrieved from Super I/O registers
+ Datasheet: not available
* Winbond W83667HG
Prefix: 'w83667hg'
Addresses scanned: ISA address retrieved from Super I/O registers
@@ -28,8 +32,8 @@ Description
-----------
This driver implements support for the Winbond W83627EHF, W83627EHG,
-W83627DHG and W83667HG super I/O chips. We will refer to them collectively
-as Winbond chips.
+W83627DHG, W83627DHG-P and W83667HG super I/O chips. We will refer to them
+collectively as Winbond chips.
The chips implement three temperature sensors, five fan rotation
speed sensors, ten analog voltage sensors (only nine for the 627DHG), one
@@ -135,3 +139,6 @@ done in the driver for all register addresses.
The DHG also supports PECI, where the DHG queries Intel CPU temperatures, and
the ICH8 southbridge gets that data via PECI from the DHG, so that the
southbridge drives the fans. And the DHG supports SST, a one-wire serial bus.
+
+The DHG-P has an additional automatic fan speed control mode named Smart Fan
+(TM) III+. This mode is not yet supported by the driver.
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 22efedf60c8..2e758b0e945 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -19,6 +19,9 @@ Supported adapters:
* VIA Technologies, Inc. VX800/VX820
Datasheet: available on http://linux.via.com.tw
+ * VIA Technologies, Inc. VX855/VX875
+ Datasheet: Availability unknown
+
Authors:
Kyösti Mälkki <kmalkki@cc.hut.fi>,
Mark D. Studebaker <mdsxyz123@yahoo.com>,
@@ -53,6 +56,7 @@ Your lspci -n listing must show one of these :
device 1106:3287 (VT8251)
device 1106:8324 (CX700)
device 1106:8353 (VX800/VX820)
+ device 1106:8409 (VX855/VX875)
If none of these show up, you should look in the BIOS for settings like
enable ACPI / SMBus or even USB.
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
new file mode 100644
index 00000000000..363044609da
--- /dev/null
+++ b/Documentation/kmemcheck.txt
@@ -0,0 +1,773 @@
+GETTING STARTED WITH KMEMCHECK
+==============================
+
+Vegard Nossum <vegardno@ifi.uio.no>
+
+
+Contents
+========
+0. Introduction
+1. Downloading
+2. Configuring and compiling
+3. How to use
+3.1. Booting
+3.2. Run-time enable/disable
+3.3. Debugging
+3.4. Annotating false positives
+4. Reporting errors
+5. Technical description
+
+
+0. Introduction
+===============
+
+kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
+is a dynamic checker that detects and warns about some uses of uninitialized
+memory.
+
+Userspace programmers might be familiar with Valgrind's memcheck. The main
+difference between memcheck and kmemcheck is that memcheck works for userspace
+programs only, and kmemcheck works for the kernel only. The implementations
+are of course vastly different. Because of this, kmemcheck is not as accurate
+as memcheck, but it turns out to be good enough in practice to discover real
+programmer errors that the compiler is not able to find through static
+analysis.
+
+Enabling kmemcheck on a kernel will probably slow it down to the extent that
+the machine will not be usable for normal workloads such as e.g. an
+interactive desktop. kmemcheck will also cause the kernel to use about twice
+as much memory as normal. For this reason, kmemcheck is strictly a debugging
+feature.
+
+
+1. Downloading
+==============
+
+kmemcheck can only be downloaded using git. If you want to write patches
+against the current code, you should use the kmemcheck development branch of
+the tip tree. It is also possible to use the linux-next tree, which also
+includes the latest version of kmemcheck.
+
+Assuming that you've already cloned the linux-2.6.git repository, all you
+have to do is add the -tip tree as a remote, like this:
+
+ $ git remote add tip git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git
+
+To actually download the tree, fetch the remote:
+
+ $ git fetch tip
+
+And to check out a new local branch with the kmemcheck code:
+
+ $ git checkout -b kmemcheck tip/kmemcheck
+
+General instructions for the -tip tree can be found here:
+http://people.redhat.com/mingo/tip.git/readme.txt
+
+
+2. Configuring and compiling
+============================
+
+kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
+configuration variables must have specific settings in order for the kmemcheck
+menu to even appear in "menuconfig". These are:
+
+ o CONFIG_CC_OPTIMIZE_FOR_SIZE=n
+
+ This option is located under "General setup" / "Optimize for size".
+
+ Without this, gcc will use certain optimizations that usually lead to
+ false positive warnings from kmemcheck. An example of this is a 16-bit
+ field in a struct, where gcc may load 32 bits, then discard the upper
+ 16 bits. kmemcheck sees only the 32-bit load, and may trigger a
+ warning for the upper 16 bits (if they're uninitialized).
+
+ o CONFIG_SLAB=y or CONFIG_SLUB=y
+
+ This option is located under "General setup" / "Choose SLAB
+ allocator".
+
+ o CONFIG_FUNCTION_TRACER=n
+
+ This option is located under "Kernel hacking" / "Tracers" / "Kernel
+ Function Tracer"
+
+ When function tracing is compiled in, gcc emits a call to another
+ function at the beginning of every function. This means that when the
+ page fault handler is called, the ftrace framework will be called
+ before kmemcheck has had a chance to handle the fault. If ftrace then
+ modifies memory that was tracked by kmemcheck, the result is an
+ endless recursive page fault.
+
+ o CONFIG_DEBUG_PAGEALLOC=n
+
+ This option is located under "Kernel hacking" / "Debug page memory
+ allocations".
+
+In addition, I highly recommend turning on CONFIG_DEBUG_INFO=y. This is also
+located under "Kernel hacking". With this, you will be able to get line number
+information from the kmemcheck warnings, which is extremely valuable in
+debugging a problem. This option is not mandatory, however, because it slows
+down the compilation process and produces a much bigger kernel image.
+
+Now the kmemcheck menu should be visible (under "Kernel hacking" / "kmemcheck:
+trap use of uninitialized memory"). Here follows a description of the
+kmemcheck configuration variables:
+
+ o CONFIG_KMEMCHECK
+
+ This must be enabled in order to use kmemcheck at all...
+
+ o CONFIG_KMEMCHECK_[DISABLED | ENABLED | ONESHOT]_BY_DEFAULT
+
+ This option controls the status of kmemcheck at boot-time. "Enabled"
+ will enable kmemcheck right from the start, "disabled" will boot the
+ kernel as normal (but with the kmemcheck code compiled in, so it can
+ be enabled at run-time after the kernel has booted), and "one-shot" is
+ a special mode which will turn kmemcheck off automatically after
+ detecting the first use of uninitialized memory.
+
+ If you are using kmemcheck to actively debug a problem, then you
+ probably want to choose "enabled" here.
+
+ The one-shot mode is mostly useful in automated test setups because it
+ can prevent floods of warnings and increase the chances of the machine
+ surviving in case something is really wrong. In other cases, the one-
+ shot mode could actually be counter-productive because it would turn
+ itself off at the very first error -- in the case of a false positive
+ too -- and this would come in the way of debugging the specific
+ problem you were interested in.
+
+ If you would like to use your kernel as normal, but with a chance to
+ enable kmemcheck in case of some problem, it might be a good idea to
+ choose "disabled" here. When kmemcheck is disabled, most of the run-
+ time overhead is not incurred, and the kernel will be almost as fast
+ as normal.
+
+ o CONFIG_KMEMCHECK_QUEUE_SIZE
+
+ Select the maximum number of error reports to store in an internal
+ (fixed-size) buffer. Since errors can occur virtually anywhere and in
+ any context, we need a temporary storage area which is guaranteed not
+ to generate any other page faults when accessed. The queue will be
+ emptied as soon as a tasklet may be scheduled. If the queue is full,
+ new error reports will be lost.
+
+ The default value of 64 is probably fine. If some code produces more
+ than 64 errors within an irqs-off section, then the code is likely to
+ produce many, many more, too, and these additional reports seldom give
+ any more information (the first report is usually the most valuable
+ anyway).
+
+ This number might have to be adjusted if you are not using serial
+ console or similar to capture the kernel log. If you are using the
+ "dmesg" command to save the log, then getting a lot of kmemcheck
+ warnings might overflow the kernel log itself, and the earlier reports
+ will get lost in that way instead. Try setting this to 10 or so on
+ such a setup.
+
+ o CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT
+
+ Select the number of shadow bytes to save along with each entry of the
+ error-report queue. These bytes indicate what parts of an allocation
+ are initialized, uninitialized, etc. and will be displayed when an
+ error is detected to help the debugging of a particular problem.
+
+ The number entered here is actually the logarithm of the number of
+ bytes that will be saved. So if you pick for example 5 here, kmemcheck
+ will save 2^5 = 32 bytes.
+
+ The default value should be fine for debugging most problems. It also
+ fits nicely within 80 columns.
+
+ o CONFIG_KMEMCHECK_PARTIAL_OK
+
+ This option (when enabled) works around certain GCC optimizations that
+ produce 32-bit reads from 16-bit variables where the upper 16 bits are
+ thrown away afterwards.
+
+ The default value (enabled) is recommended. This may of course hide
+ some real errors, but disabling it would probably produce a lot of
+ false positives.
+
+ o CONFIG_KMEMCHECK_BITOPS_OK
+
+ This option silences warnings that would be generated for bit-field
+ accesses where not all the bits are initialized at the same time. This
+ may also hide some real bugs.
+
+ This option is probably obsolete, or it should be replaced with
+ the kmemcheck-/bitfield-annotations for the code in question. The
+ default value is therefore fine.
+
+Now compile the kernel as usual.
+
+
+3. How to use
+=============
+
+3.1. Booting
+============
+
+First some information about the command-line options. There is only one
+option specific to kmemcheck, and this is called "kmemcheck". It can be used
+to override the default mode as chosen by the CONFIG_KMEMCHECK_*_BY_DEFAULT
+option. Its possible settings are:
+
+ o kmemcheck=0 (disabled)
+ o kmemcheck=1 (enabled)
+ o kmemcheck=2 (one-shot mode)
+
+If SLUB debugging has been enabled in the kernel, it may take precedence over
+kmemcheck in such a way that the slab caches which are under SLUB debugging
+will not be tracked by kmemcheck. In order to ensure that this doesn't happen
+(even though it shouldn't by default), use SLUB's boot option "slub_debug",
+like this: slub_debug=-
+
+In fact, this option may also be used for fine-grained control over SLUB vs.
+kmemcheck. For example, if the command line includes "kmemcheck=1
+slub_debug=,dentry", then SLUB debugging will be used only for the "dentry"
+slab cache, and with kmemcheck tracking all the other caches. This is advanced
+usage, however, and is not generally recommended.
+
+
+3.2. Run-time enable/disable
+============================
+
+When the kernel has booted, it is possible to enable or disable kmemcheck at
+run-time. WARNING: This feature is still experimental and may cause false
+positive warnings to appear. Therefore, try not to use this. If you find that
+it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
+will be happy to take bug reports.
+
+Use the file /proc/sys/kernel/kmemcheck for this purpose, e.g.:
+
+ $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
+
+The numbers are the same as for the kmemcheck= command-line option.
+
+
+3.3. Debugging
+==============
+
+A typical report will look something like this:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+ ^
+
+Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
+RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
+RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
+R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
+R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
+FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
+CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
+CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The single most valuable information in this report is the RIP (or EIP on 32-
+bit) value. This will help us pinpoint exactly which instruction that caused
+the warning.
+
+If your kernel was compiled with CONFIG_DEBUG_INFO=y, then all we have to do
+is give this address to the addr2line program, like this:
+
+ $ addr2line -e vmlinux -i ffffffff8104ede8
+ arch/x86/include/asm/string_64.h:12
+ include/asm-generic/siginfo.h:287
+ kernel/signal.c:380
+ kernel/signal.c:410
+
+The "-e vmlinux" tells addr2line which file to look in. IMPORTANT: This must
+be the vmlinux of the kernel that produced the warning in the first place! If
+not, the line number information will almost certainly be wrong.
+
+The "-i" tells addr2line to also print the line numbers of inlined functions.
+In this case, the flag was very important, because otherwise, it would only
+have printed the first line, which is just a call to memcpy(), which could be
+called from a thousand places in the kernel, and is therefore not very useful.
+These inlined functions would not show up in the stack trace above, simply
+because the kernel doesn't load the extra debugging information. This
+technique can of course be used with ordinary kernel oopses as well.
+
+In this case, it's the caller of memcpy() that is interesting, and it can be
+found in include/asm-generic/siginfo.h, line 287:
+
+281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+282 {
+283 if (from->si_code < 0)
+284 memcpy(to, from, sizeof(*to));
+285 else
+286 /* _sigchld is currently the largest know union member */
+287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+288 }
+
+Since this was a read (kmemcheck usually warns about reads only, though it can
+warn about writes to unallocated or freed memory as well), it was probably the
+"from" argument which contained some uninitialized bytes. Following the chain
+of calls, we move upwards to see where "from" was allocated or initialized,
+kernel/signal.c, line 380:
+
+359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+360 {
+...
+367 list_for_each_entry(q, &list->list, list) {
+368 if (q->info.si_signo == sig) {
+369 if (first)
+370 goto still_pending;
+371 first = q;
+...
+377 if (first) {
+378 still_pending:
+379 list_del_init(&first->list);
+380 copy_siginfo(info, &first->info);
+381 __sigqueue_free(first);
+...
+392 }
+393 }
+
+Here, it is &first->info that is being passed on to copy_siginfo(). The
+variable "first" was found on a list -- passed in as the second argument to
+collect_signal(). We continue our journey through the stack, to figure out
+where the item on "list" was allocated or initialized. We move to line 410:
+
+395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
+396 siginfo_t *info)
+397 {
+...
+410 collect_signal(sig, pending, info);
+...
+414 }
+
+Now we need to follow the "pending" pointer, since that is being passed on to
+collect_signal() as "list". At this point, we've run out of lines from the
+"addr2line" output. Not to worry, we just paste the next addresses from the
+kmemcheck stack dump, i.e.:
+
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+
+ $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
+ ffffffff8100b87d ffffffff8100c7b5
+ kernel/signal.c:446
+ kernel/signal.c:1806
+ arch/x86/kernel/signal.c:805
+ arch/x86/kernel/signal.c:871
+ arch/x86/kernel/entry_64.S:694
+
+Remember that since these addresses were found on the stack and not as the
+RIP value, they actually point to the _next_ instruction (they are return
+addresses). This becomes obvious when we look at the code for line 446:
+
+422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+423 {
+...
+431 signr = __dequeue_signal(&tsk->signal->shared_pending,
+432 mask, info);
+433 /*
+434 * itimer signal ?
+435 *
+436 * itimers are process shared and we restart periodic
+437 * itimers in the signal delivery path to prevent DoS
+438 * attacks in the high resolution timer case. This is
+439 * compliant with the old way of self restarting
+440 * itimers, as the SIGALRM is a legacy signal and only
+441 * queued once. Changing the restart behaviour to
+442 * restart the timer in the signal dequeue path is
+443 * reducing the timer noise on heavy loaded !highres
+444 * systems too.
+445 */
+446 if (unlikely(signr == SIGALRM)) {
+...
+489 }
+
+So instead of looking at 446, we should be looking at 431, which is the line
+that executes just before 446. Here we see that what we are looking for is
+&tsk->signal->shared_pending.
+
+Our next task is now to figure out which function that puts items on this
+"shared_pending" list. A crude, but efficient tool, is git grep:
+
+ $ git grep -n 'shared_pending' kernel/
+ ...
+ kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
+ kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
+ ...
+
+There were more results, but none of them were related to list operations,
+and these were the only assignments. We inspect the line numbers more closely
+and find that this is indeed where items are being added to the list:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817 int group)
+818 {
+...
+828 pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852 (is_si_special(info) ||
+853 info->si_code >= 0)));
+854 if (q) {
+855 list_add_tail(&q->list, &pending->list);
+...
+890 }
+
+and:
+
+1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
+1310 {
+....
+1339 pending = group ? &t->signal->shared_pending : &t->pending;
+1340 list_add_tail(&q->list, &pending->list);
+....
+1347 }
+
+In the first case, the list element we are looking for, "q", is being returned
+from the function __sigqueue_alloc(), which looks like an allocation function.
+Let's take a look at it:
+
+187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
+188 int override_rlimit)
+189 {
+190 struct sigqueue *q = NULL;
+191 struct user_struct *user;
+192
+193 /*
+194 * We won't get problems with the target's UID changing under us
+195 * because changing it requires RCU be used, and if t != current, the
+196 * caller must be holding the RCU readlock (by way of a spinlock) and
+197 * we use RCU protection here
+198 */
+199 user = get_uid(__task_cred(t)->user);
+200 atomic_inc(&user->sigpending);
+201 if (override_rlimit ||
+202 atomic_read(&user->sigpending) <=
+203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+204 q = kmem_cache_alloc(sigqueue_cachep, flags);
+205 if (unlikely(q == NULL)) {
+206 atomic_dec(&user->sigpending);
+207 free_uid(user);
+208 } else {
+209 INIT_LIST_HEAD(&q->list);
+210 q->flags = 0;
+211 q->user = user;
+212 }
+213
+214 return q;
+215 }
+
+We see that this function initializes q->list, q->flags, and q->user. It seems
+that now is the time to look at the definition of "struct sigqueue", e.g.:
+
+14 struct sigqueue {
+15 struct list_head list;
+16 int flags;
+17 siginfo_t info;
+18 struct user_struct *user;
+19 };
+
+And, you might remember, it was a memcpy() on &first->info that caused the
+warning, so this makes perfect sense. It also seems reasonable to assume that
+it is the caller of __sigqueue_alloc() that has the responsibility of filling
+out (initializing) this member.
+
+But just which fields of the struct were uninitialized? Let's look at
+kmemcheck's report again:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+ ^
+
+These first two lines are the memory dump of the memory object itself, and the
+shadow bytemap, respectively. The memory object itself is in this case
+&first->info. Just beware that the start of this dump is NOT the start of the
+object itself! The position of the caret (^) corresponds with the address of
+the read (ffff88003e4a2024).
+
+The shadow bytemap dump legend is as follows:
+
+ i - initialized
+ u - uninitialized
+ a - unallocated (memory has been allocated by the slab layer, but has not
+ yet been handed off to anybody)
+ f - freed (memory has been allocated by the slab layer, but has been freed
+ by the previous owner)
+
+In order to figure out where (relative to the start of the object) the
+uninitialized memory was located, we have to look at the disassembly. For
+that, we'll need the RIP address again:
+
+RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+
+ $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
+ ffffffff8104edc8: mov %r8,0x8(%r8)
+ ffffffff8104edcc: test %r10d,%r10d
+ ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
+ ffffffff8104edd5: mov %rax,%rdx
+ ffffffff8104edd8: mov $0xc,%ecx
+ ffffffff8104eddd: mov %r13,%rdi
+ ffffffff8104ede0: mov $0x30,%eax
+ ffffffff8104ede5: mov %rdx,%rsi
+ ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edea: test $0x2,%al
+ ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
+ ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edf0: test $0x1,%al
+ ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
+ ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edf5: mov %r8,%rdi
+ ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
+
+As expected, it's the "rep movsl" instruction from the memcpy() that causes
+the warning. We know about REP MOVSL that it uses the register RCX to count
+the number of remaining iterations. By taking a look at the register dump
+again (from the kmemcheck report), we can figure out how many bytes were left
+to copy:
+
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+
+By looking at the disassembly, we also see that %ecx is being loaded with the
+value $0xc just before (ffffffff8104edd8), so we are very lucky. Keep in mind
+that this is the number of iterations, not bytes. And since this is a "long"
+operation, we need to multiply by 4 to get the number of bytes. So this means
+that the uninitialized value was encountered at 4 * (0xc - 0x9) = 12 bytes
+from the start of the object.
+
+We can now try to figure out which field of the "struct siginfo" that was not
+initialized. This is the beginning of the struct:
+
+40 typedef struct siginfo {
+41 int si_signo;
+42 int si_errno;
+43 int si_code;
+44
+45 union {
+..
+92 } _sifields;
+93 } siginfo_t;
+
+On 64-bit, the int is 4 bytes long, so it must the the union member that has
+not been initialized. We can verify this using gdb:
+
+ $ gdb vmlinux
+ ...
+ (gdb) p &((struct siginfo *) 0)->_sifields
+ $1 = (union {...} *) 0x10
+
+Actually, it seems that the union member is located at offset 0x10 -- which
+means that gcc has inserted 4 bytes of padding between the members si_code
+and _sifields. We can now get a fuller picture of the memory dump:
+
+ _----------------------------=> si_code
+ / _--------------------=> (padding)
+ | / _------------=> _sifields(._kill._pid)
+ | | / _----=> _sifields(._kill._uid)
+ | | | /
+-------|-------|-------|-------|
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+
+This allows us to realize another important fact: si_code contains the value
+0x80. Remember that x86 is little endian, so the first 4 bytes "80000000" are
+really the number 0x00000080. With a bit of research, we find that this is
+actually the constant SI_KERNEL defined in include/asm-generic/siginfo.h:
+
+144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+
+This macro is used in exactly one place in the x86 kernel: In send_signal()
+in kernel/signal.c:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817 int group)
+818 {
+...
+828 pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852 (is_si_special(info) ||
+853 info->si_code >= 0)));
+854 if (q) {
+855 list_add_tail(&q->list, &pending->list);
+856 switch ((unsigned long) info) {
+...
+865 case (unsigned long) SEND_SIG_PRIV:
+866 q->info.si_signo = sig;
+867 q->info.si_errno = 0;
+868 q->info.si_code = SI_KERNEL;
+869 q->info.si_pid = 0;
+870 q->info.si_uid = 0;
+871 break;
+...
+890 }
+
+Not only does this match with the .si_code member, it also matches the place
+we found earlier when looking for where siginfo_t objects are enqueued on the
+"shared_pending" list.
+
+So to sum up: It seems that it is the padding introduced by the compiler
+between two struct fields that is uninitialized, and this gets reported when
+we do a memcpy() on the struct. This means that we have identified a false
+positive warning.
+
+Normally, kmemcheck will not report uninitialized accesses in memcpy() calls
+when both the source and destination addresses are tracked. (Instead, we copy
+the shadow bytemap as well). In this case, the destination address clearly
+was not tracked. We can dig a little deeper into the stack trace from above:
+
+ arch/x86/kernel/signal.c:805
+ arch/x86/kernel/signal.c:871
+ arch/x86/kernel/entry_64.S:694
+
+And we clearly see that the destination siginfo object is located on the
+stack:
+
+782 static void do_signal(struct pt_regs *regs)
+783 {
+784 struct k_sigaction ka;
+785 siginfo_t info;
+...
+804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+...
+854 }
+
+And this &info is what eventually gets passed to copy_siginfo() as the
+destination argument.
+
+Now, even though we didn't find an actual error here, the example is still a
+good one, because it shows how one would go about to find out what the report
+was all about.
+
+
+3.4. Annotating false positives
+===============================
+
+There are a few different ways to make annotations in the source code that
+will keep kmemcheck from checking and reporting certain allocations. Here
+they are:
+
+ o __GFP_NOTRACK_FALSE_POSITIVE
+
+ This flag can be passed to kmalloc() or kmem_cache_alloc() (therefore
+ also to other functions that end up calling one of these) to indicate
+ that the allocation should not be tracked because it would lead to
+ a false positive report. This is a "big hammer" way of silencing
+ kmemcheck; after all, even if the false positive pertains to
+ particular field in a struct, for example, we will now lose the
+ ability to find (real) errors in other parts of the same struct.
+
+ Example:
+
+ /* No warnings will ever trigger on accessing any part of x */
+ x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
+
+ o kmemcheck_bitfield_begin(name)/kmemcheck_bitfield_end(name) and
+ kmemcheck_annotate_bitfield(ptr, name)
+
+ The first two of these three macros can be used inside struct
+ definitions to signal, respectively, the beginning and end of a
+ bitfield. Additionally, this will assign the bitfield a name, which
+ is given as an argument to the macros.
+
+ Having used these markers, one can later use
+ kmemcheck_annotate_bitfield() at the point of allocation, to indicate
+ which parts of the allocation is part of a bitfield.
+
+ Example:
+
+ struct foo {
+ int x;
+
+ kmemcheck_bitfield_begin(flags);
+ int flag_a:1;
+ int flag_b:1;
+ kmemcheck_bitfield_end(flags);
+
+ int y;
+ };
+
+ struct foo *x = kmalloc(sizeof *x);
+
+ /* No warnings will trigger on accessing the bitfield of x */
+ kmemcheck_annotate_bitfield(x, flags);
+
+ Note that kmemcheck_annotate_bitfield() can be used even before the
+ return value of kmalloc() is checked -- in other words, passing NULL
+ as the first argument is legal (and will do nothing).
+
+
+4. Reporting errors
+===================
+
+As we have seen, kmemcheck will produce false positive reports. Therefore, it
+is not very wise to blindly post kmemcheck warnings to mailing lists and
+maintainers. Instead, I encourage maintainers and developers to find errors
+in their own code. If you get a warning, you can try to work around it, try
+to figure out if it's a real error or not, or simply ignore it. Most
+developers know their own code and will quickly and efficiently determine the
+root cause of a kmemcheck report. This is therefore also the most efficient
+way to work with kmemcheck.
+
+That said, we (the kmemcheck maintainers) will always be on the lookout for
+false positives that we can annotate and silence. So whatever you find,
+please drop us a note privately! Kernel configs and steps to reproduce (if
+available) are of course a great help too.
+
+Happy hacking!
+
+
+5. Technical description
+========================
+
+kmemcheck works by marking memory pages non-present. This means that whenever
+somebody attempts to access the page, a page fault is generated. The page
+fault handler notices that the page was in fact only hidden, and so it calls
+on the kmemcheck code to make further investigations.
+
+When the investigations are completed, kmemcheck "shows" the page by marking
+it present (as it would be under normal circumstances). This way, the
+interrupted code can continue as usual.
+
+But after the instruction has been executed, we should hide the page again, so
+that we can catch the next access too! Now kmemcheck makes use of a debugging
+feature of the processor, namely single-stepping. When the processor has
+finished the one instruction that generated the memory access, a debug
+exception is raised. From here, we simply hide the page again and continue
+execution, this time with the single-stepping feature turned off.
+
+kmemcheck requires some assistance from the memory allocator in order to work.
+The memory allocator needs to
+
+ 1. Tell kmemcheck about newly allocated pages and pages that are about to
+ be freed. This allows kmemcheck to set up and tear down the shadow memory
+ for the pages in question. The shadow memory stores the status of each
+ byte in the allocation proper, e.g. whether it is initialized or
+ uninitialized.
+
+ 2. Tell kmemcheck which parts of memory should be marked uninitialized.
+ There are actually a few more states, such as "not yet allocated" and
+ "recently freed".
+
+If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
+memory that can take page faults because of kmemcheck.
+
+If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
+request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
+This does not prevent the page faults from occurring, however, but marks the
+object in question as being initialized so that no warnings will ever be
+produced for this object.
+
+Currently, the SLAB and SLUB allocators are supported by kmemcheck.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1e7a769a10f..053037a1fe6 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -507,9 +507,9 @@ http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115)
Appendix A: The kprobes debugfs interface
With recent kernels (> 2.6.20) the list of registered kprobes is visible
-under the /debug/kprobes/ directory (assuming debugfs is mounted at /debug).
+under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug).
-/debug/kprobes/list: Lists all registered probes on the system
+/sys/kernel/debug/kprobes/list: Lists all registered probes on the system
c015d71a k vfs_read+0x0
c011a316 j do_fork+0x0
@@ -525,7 +525,7 @@ virtual addresses that correspond to modules that've been unloaded),
such probes are marked with [GONE]. If the probe is temporarily disabled,
such probes are marked with [DISABLED].
-/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
+/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
Provides a knob to globally and forcibly turn registered kprobes ON or OFF.
By default, all kprobes are enabled. By echoing "0" to this file, all
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 7bd27f0e288..a39b3c749de 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -7,7 +7,6 @@ Copyright 2008 Red Hat Inc.
(dual licensed under the GPL v2)
Reviewers: Elias Oltmanns, Randy Dunlap, Andrew Morton,
John Kacur, and David Teigland.
-
Written for: 2.6.28-rc2
Introduction
@@ -33,13 +32,26 @@ The File System
Ftrace uses the debugfs file system to hold the control files as
well as the files to display output.
-To mount the debugfs system:
+When debugfs is configured into the kernel (which selecting any ftrace
+option will do) the directory /sys/kernel/debug will be created. To mount
+this directory, you can add to your /etc/fstab file:
+
+ debugfs /sys/kernel/debug debugfs defaults 0 0
+
+Or you can mount it at run time with:
+
+ mount -t debugfs nodev /sys/kernel/debug
- # mkdir /debug
- # mount -t debugfs nodev /debug
+For quicker access to that directory you may want to make a soft link to
+it:
-( Note: it is more common to mount at /sys/kernel/debug, but for
- simplicity this document will use /debug)
+ ln -s /sys/kernel/debug /debug
+
+Any selected ftrace option will also create a directory called tracing
+within the debugfs. The rest of the document will assume that you are in
+the ftrace directory (cd /sys/kernel/debug/tracing) and will only concentrate
+on the files within that directory and not distract from the content with
+the extended "/sys/kernel/debug/tracing" path name.
That's it! (assuming that you have ftrace configured into your kernel)
@@ -389,18 +401,18 @@ trace_options
The trace_options file is used to control what gets printed in
the trace output. To see what is available, simply cat the file:
- cat /debug/tracing/trace_options
+ cat trace_options
print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
To disable one of the options, echo in the option prepended with
"no".
- echo noprint-parent > /debug/tracing/trace_options
+ echo noprint-parent > trace_options
To enable an option, leave off the "no".
- echo sym-offset > /debug/tracing/trace_options
+ echo sym-offset > trace_options
Here are the available options:
@@ -476,11 +488,11 @@ sched_switch
This tracer simply records schedule switches. Here is an example
of how to use it.
- # echo sched_switch > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo sched_switch > current_tracer
+ # echo 1 > tracing_enabled
# sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
# tracer: sched_switch
#
@@ -583,13 +595,13 @@ new trace is saved.
To reset the maximum, echo 0 into tracing_max_latency. Here is
an example:
- # echo irqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo irqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
# tracer: irqsoff
#
irqsoff latency trace v1.1.5 on 2.6.26
@@ -690,13 +702,13 @@ Like the irqsoff tracer, it records the maximum latency for
which preemption was disabled. The control of preemptoff tracer
is much like the irqsoff tracer.
- # echo preemptoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
# tracer: preemptoff
#
preemptoff latency trace v1.1.5 on 2.6.26-rc8
@@ -837,13 +849,13 @@ tracer.
Again, using this trace is much like the irqsoff and preemptoff
tracers.
- # echo preemptirqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptirqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
# tracer: preemptirqsoff
#
preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
@@ -999,12 +1011,12 @@ slightly differently than we did with the previous tracers.
Instead of performing an 'ls', we will run 'sleep 1' under
'chrt' which changes the priority of the task.
- # echo wakeup > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo wakeup > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
# chrt -f 5 sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.26-rc8
@@ -1114,11 +1126,11 @@ can be done from the debug file system. Make sure the
ftrace_enabled is set; otherwise this tracer is a nop.
# sysctl kernel.ftrace_enabled=1
- # echo function > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo function > current_tracer
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
# tracer: function
#
# TASK-PID CPU# TIMESTAMP FUNCTION
@@ -1155,7 +1167,7 @@ int trace_fd;
[...]
int main(int argc, char *argv[]) {
[...]
- trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY);
+ trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
[...]
if (condition_hit()) {
write(trace_fd, "0", 1);
@@ -1163,26 +1175,20 @@ int main(int argc, char *argv[]) {
[...]
}
-Note: Here we hard coded the path name. The debugfs mount is not
-guaranteed to be at /debug (and is more commonly at
-/sys/kernel/debug). For simple one time traces, the above is
-sufficent. For anything else, a search through /proc/mounts may
-be needed to find where the debugfs file-system is mounted.
-
Single thread tracing
---------------------
-By writing into /debug/tracing/set_ftrace_pid you can trace a
+By writing into set_ftrace_pid you can trace a
single thread. For example:
-# cat /debug/tracing/set_ftrace_pid
+# cat set_ftrace_pid
no pid
-# echo 3111 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/set_ftrace_pid
+# echo 3111 > set_ftrace_pid
+# cat set_ftrace_pid
3111
-# echo function > /debug/tracing/current_tracer
-# cat /debug/tracing/trace | head
+# echo function > current_tracer
+# cat trace | head
# tracer: function
#
# TASK-PID CPU# TIMESTAMP FUNCTION
@@ -1193,8 +1199,8 @@ no pid
yum-updatesd-3111 [003] 1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
yum-updatesd-3111 [003] 1637.254685: fget_light <-do_sys_poll
yum-updatesd-3111 [003] 1637.254686: pipe_poll <-do_sys_poll
-# echo -1 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/trace |head
+# echo -1 > set_ftrace_pid
+# cat trace |head
# tracer: function
#
# TASK-PID CPU# TIMESTAMP FUNCTION
@@ -1216,6 +1222,51 @@ something like this simple program:
#include <fcntl.h>
#include <unistd.h>
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+const char *find_debugfs(void)
+{
+ static char debugfs[MAX_PATH+1];
+ static int debugfs_found;
+ char type[100];
+ FILE *fp;
+
+ if (debugfs_found)
+ return debugfs;
+
+ if ((fp = fopen("/proc/mounts","r")) == NULL) {
+ perror("/proc/mounts");
+ return NULL;
+ }
+
+ while (fscanf(fp, "%*s %"
+ STR(MAX_PATH)
+ "s %99s %*s %*d %*d\n",
+ debugfs, type) == 2) {
+ if (strcmp(type, "debugfs") == 0)
+ break;
+ }
+ fclose(fp);
+
+ if (strcmp(type, "debugfs") != 0) {
+ fprintf(stderr, "debugfs not mounted");
+ return NULL;
+ }
+
+ debugfs_found = 1;
+
+ return debugfs;
+}
+
+const char *tracing_file(const char *file_name)
+{
+ static char trace_file[MAX_PATH+1];
+ snprintf(trace_file, MAX_PATH, "%s/%s", find_debugfs(), file_name);
+ return trace_file;
+}
+
int main (int argc, char **argv)
{
if (argc < 1)
@@ -1226,12 +1277,12 @@ int main (int argc, char **argv)
char line[64];
int s;
- ffd = open("/debug/tracing/current_tracer", O_WRONLY);
+ ffd = open(tracing_file("current_tracer"), O_WRONLY);
if (ffd < 0)
exit(-1);
write(ffd, "nop", 3);
- fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY);
+ fd = open(tracing_file("set_ftrace_pid"), O_WRONLY);
s = sprintf(line, "%d\n", getpid());
write(fd, line, s);
@@ -1383,22 +1434,22 @@ want, depending on your needs.
tracing_cpu_mask file) or you might sometimes see unordered
function calls while cpu tracing switch.
- hide: echo nofuncgraph-cpu > /debug/tracing/trace_options
- show: echo funcgraph-cpu > /debug/tracing/trace_options
+ hide: echo nofuncgraph-cpu > trace_options
+ show: echo funcgraph-cpu > trace_options
- The duration (function's time of execution) is displayed on
the closing bracket line of a function or on the same line
than the current function in case of a leaf one. It is default
enabled.
- hide: echo nofuncgraph-duration > /debug/tracing/trace_options
- show: echo funcgraph-duration > /debug/tracing/trace_options
+ hide: echo nofuncgraph-duration > trace_options
+ show: echo funcgraph-duration > trace_options
- The overhead field precedes the duration field in case of
reached duration thresholds.
- hide: echo nofuncgraph-overhead > /debug/tracing/trace_options
- show: echo funcgraph-overhead > /debug/tracing/trace_options
+ hide: echo nofuncgraph-overhead > trace_options
+ show: echo funcgraph-overhead > trace_options
depends on: funcgraph-duration
ie:
@@ -1427,8 +1478,8 @@ want, depending on your needs.
- The task/pid field displays the thread cmdline and pid which
executed the function. It is default disabled.
- hide: echo nofuncgraph-proc > /debug/tracing/trace_options
- show: echo funcgraph-proc > /debug/tracing/trace_options
+ hide: echo nofuncgraph-proc > trace_options
+ show: echo funcgraph-proc > trace_options
ie:
@@ -1451,8 +1502,8 @@ want, depending on your needs.
system clock since it started. A snapshot of this time is
given on each entry/exit of functions
- hide: echo nofuncgraph-abstime > /debug/tracing/trace_options
- show: echo funcgraph-abstime > /debug/tracing/trace_options
+ hide: echo nofuncgraph-abstime > trace_options
+ show: echo funcgraph-abstime > trace_options
ie:
@@ -1549,7 +1600,7 @@ listed in:
available_filter_functions
- # cat /debug/tracing/available_filter_functions
+ # cat available_filter_functions
put_prev_task_idle
kmem_cache_create
pick_next_task_rt
@@ -1561,12 +1612,12 @@ mutex_lock
If I am only interested in sys_nanosleep and hrtimer_interrupt:
# echo sys_nanosleep hrtimer_interrupt \
- > /debug/tracing/set_ftrace_filter
- # echo ftrace > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ > set_ftrace_filter
+ # echo ftrace > current_tracer
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
# tracer: ftrace
#
# TASK-PID CPU# TIMESTAMP FUNCTION
@@ -1577,7 +1628,7 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
To see which functions are being traced, you can cat the file:
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
hrtimer_interrupt
sys_nanosleep
@@ -1597,7 +1648,7 @@ Note: It is better to use quotes to enclose the wild cards,
otherwise the shell may expand the parameters into names
of files in the local directory.
- # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' > set_ftrace_filter
Produces:
@@ -1618,7 +1669,7 @@ Produces:
Notice that we lost the sys_nanosleep.
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
hrtimer_run_queues
hrtimer_run_pending
hrtimer_init
@@ -1644,17 +1695,17 @@ To append to the filters, use '>>'
To clear out a filter so that all functions will be recorded
again:
- # echo > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo > set_ftrace_filter
+ # cat set_ftrace_filter
#
Again, now we want to append.
- # echo sys_nanosleep > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo sys_nanosleep > set_ftrace_filter
+ # cat set_ftrace_filter
sys_nanosleep
- # echo 'hrtimer_*' >> /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' >> set_ftrace_filter
+ # cat set_ftrace_filter
hrtimer_run_queues
hrtimer_run_pending
hrtimer_init
@@ -1677,7 +1728,7 @@ hrtimer_init_sleeper
The set_ftrace_notrace prevents those functions from being
traced.
- # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
+ # echo '*preempt*' '*lock*' > set_ftrace_notrace
Produces:
@@ -1767,13 +1818,13 @@ the effect on the tracing is different. Every read from
trace_pipe is consumed. This means that subsequent reads will be
different. The trace is live.
- # echo function > /debug/tracing/current_tracer
- # cat /debug/tracing/trace_pipe > /tmp/trace.out &
+ # echo function > current_tracer
+ # cat trace_pipe > /tmp/trace.out &
[1] 4153
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
# tracer: function
#
# TASK-PID CPU# TIMESTAMP FUNCTION
@@ -1809,7 +1860,7 @@ number listed is the number of entries that can be recorded per
CPU. To know the full size, multiply the number of possible CPUS
with the number of entries.
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
1408 (units kilobytes)
Note, to modify this, you must have tracing completely disabled.
@@ -1817,18 +1868,18 @@ To do that, echo "nop" into the current_tracer. If the
current_tracer is not set to "nop", an EINVAL error will be
returned.
- # echo nop > /debug/tracing/current_tracer
- # echo 10000 > /debug/tracing/buffer_size_kb
- # cat /debug/tracing/buffer_size_kb
+ # echo nop > current_tracer
+ # echo 10000 > buffer_size_kb
+ # cat buffer_size_kb
10000 (units kilobytes)
The number of pages which will be allocated is limited to a
percentage of available memory. Allocating too much will produce
an error.
- # echo 1000000000000 > /debug/tracing/buffer_size_kb
+ # echo 1000000000000 > buffer_size_kb
-bash: echo: write error: Cannot allocate memory
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
85
-----------
diff --git a/Documentation/trace/mmiotrace.txt b/Documentation/trace/mmiotrace.txt
index 5731c67abc5..162effbfbde 100644
--- a/Documentation/trace/mmiotrace.txt
+++ b/Documentation/trace/mmiotrace.txt
@@ -32,41 +32,41 @@ is no way to automatically detect if you are losing events due to CPUs racing.
Usage Quick Reference
---------------------
-$ mount -t debugfs debugfs /debug
-$ echo mmiotrace > /debug/tracing/current_tracer
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ mount -t debugfs debugfs /sys/kernel/debug
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
Start X or whatever.
-$ echo "X is up" > /debug/tracing/trace_marker
-$ echo nop > /debug/tracing/current_tracer
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
Check for lost events.
Usage
-----
-Make sure debugfs is mounted to /debug. If not, (requires root privileges)
-$ mount -t debugfs debugfs /debug
+Make sure debugfs is mounted to /sys/kernel/debug. If not, (requires root privileges)
+$ mount -t debugfs debugfs /sys/kernel/debug
Check that the driver you are about to trace is not loaded.
Activate mmiotrace (requires root privileges):
-$ echo mmiotrace > /debug/tracing/current_tracer
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
Start storing the trace:
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
The 'cat' process should stay running (sleeping) in the background.
Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
accesses to areas that are ioremapped while mmiotrace is active.
During tracing you can place comments (markers) into the trace by
-$ echo "X is up" > /debug/tracing/trace_marker
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
This makes it easier to see which part of the (huge) trace corresponds to
which action. It is recommended to place descriptive markers about what you
do.
Shut down mmiotrace (requires root privileges):
-$ echo nop > /debug/tracing/current_tracer
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
The 'cat' process exits. If it does not, kill it by issuing 'fg' command and
pressing ctrl+c.
@@ -78,10 +78,10 @@ to view your kernel log and look for "mmiotrace has lost events" warning. If
events were lost, the trace is incomplete. You should enlarge the buffers and
try again. Buffers are enlarged by first seeing how large the current buffers
are:
-$ cat /debug/tracing/buffer_size_kb
+$ cat /sys/kernel/debug/tracing/buffer_size_kb
gives you a number. Approximately double this number and write it back, for
instance:
-$ echo 128000 > /debug/tracing/buffer_size_kb
+$ echo 128000 > /sys/kernel/debug/tracing/buffer_size_kb
Then start again from the top.
If you are doing a trace for a driver project, e.g. Nouveau, you should also
diff --git a/MAINTAINERS b/MAINTAINERS
index 06579abc85d..fb94addb34d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -163,9 +163,10 @@ S: Maintained
F: drivers/net/r8169.c
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
+P: Alan Cox
+M: alan@lxorguk.ukuu.org.uk
L: linux-serial@vger.kernel.org
W: http://serial.sourceforge.net
-M: alan@lxorguk.ukuu.org.uk
S: Odd Fixes
F: drivers/serial/8250*
F: include/linux/serial_8250.h
@@ -3383,6 +3384,14 @@ F: drivers/serial/kgdboc.c
F: include/linux/kgdb.h
F: kernel/kgdb.c
+KMEMCHECK
+P: Vegard Nossum
+M: vegardno@ifi.uio.no
+P Pekka Enberg
+M: penberg@cs.helsinki.fi
+L: linux-kernel@vger.kernel.org
+S: Maintained
+
KMEMLEAK
P: Catalin Marinas
M: catalin.marinas@arm.com
@@ -6104,6 +6113,12 @@ L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/rndis_wlan.c
+USB XHCI DRIVER
+P: Sarah Sharp
+M: sarah.a.sharp@intel.com
+L: linux-usb@vger.kernel.org
+S: Supported
+
USB ZC0301 DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
new file mode 100644
index 00000000000..36a85f5000c
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
@@ -0,0 +1,50 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device PHY registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, this is a seperate header file as some of the clock framework
+ * needs to touch this if the clk_48m is used as the USB OHCI or other
+ * peripheral source.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
+
+/* S3C64XX_PA_USB_HSPHY */
+
+#define S3C_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY)
+
+#define S3C_PHYPWR S3C_HSOTG_PHYREG(0x00)
+#define SRC_PHYPWR_OTG_DISABLE (1 << 4)
+#define SRC_PHYPWR_ANALOG_POWERDOWN (1 << 3)
+#define SRC_PHYPWR_FORCE_SUSPEND (1 << 1)
+
+#define S3C_PHYCLK S3C_HSOTG_PHYREG(0x04)
+#define S3C_PHYCLK_MODE_USB11 (1 << 6)
+#define S3C_PHYCLK_EXT_OSC (1 << 5)
+#define S3C_PHYCLK_CLK_FORCE (1 << 4)
+#define S3C_PHYCLK_ID_PULL (1 << 2)
+#define S3C_PHYCLK_CLKSEL_MASK (0x3 << 0)
+#define S3C_PHYCLK_CLKSEL_SHIFT (0)
+#define S3C_PHYCLK_CLKSEL_48M (0x0 << 0)
+#define S3C_PHYCLK_CLKSEL_12M (0x2 << 0)
+#define S3C_PHYCLK_CLKSEL_24M (0x3 << 0)
+
+#define S3C_RSTCON S3C_HSOTG_PHYREG(0x08)
+#define S3C_RSTCON_PHYCLK (1 << 2)
+#define S3C_RSTCON_HCLK (1 << 2)
+#define S3C_RSTCON_PHY (1 << 0)
+
+#define S3C_PHYTUNE S3C_HSOTG_PHYREG(0x20)
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
new file mode 100644
index 00000000000..8d18d9d4d14
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
@@ -0,0 +1,377 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device block registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_H __FILE__
+
+#define S3C_HSOTG_REG(x) (x)
+
+#define S3C_GOTGCTL S3C_HSOTG_REG(0x000)
+#define S3C_GOTGCTL_BSESVLD (1 << 19)
+#define S3C_GOTGCTL_ASESVLD (1 << 18)
+#define S3C_GOTGCTL_DBNC_SHORT (1 << 17)
+#define S3C_GOTGCTL_CONID_B (1 << 16)
+#define S3C_GOTGCTL_DEVHNPEN (1 << 11)
+#define S3C_GOTGCTL_HSSETHNPEN (1 << 10)
+#define S3C_GOTGCTL_HNPREQ (1 << 9)
+#define S3C_GOTGCTL_HSTNEGSCS (1 << 8)
+#define S3C_GOTGCTL_SESREQ (1 << 1)
+#define S3C_GOTGCTL_SESREQSCS (1 << 0)
+
+#define S3C_GOTGINT S3C_HSOTG_REG(0x004)
+#define S3C_GOTGINT_DbnceDone (1 << 19)
+#define S3C_GOTGINT_ADevTOUTChg (1 << 18)
+#define S3C_GOTGINT_HstNegDet (1 << 17)
+#define S3C_GOTGINT_HstnegSucStsChng (1 << 9)
+#define S3C_GOTGINT_SesReqSucStsChng (1 << 8)
+#define S3C_GOTGINT_SesEndDet (1 << 2)
+
+#define S3C_GAHBCFG S3C_HSOTG_REG(0x008)
+#define S3C_GAHBCFG_PTxFEmpLvl (1 << 8)
+#define S3C_GAHBCFG_NPTxFEmpLvl (1 << 7)
+#define S3C_GAHBCFG_DMAEn (1 << 5)
+#define S3C_GAHBCFG_HBstLen_MASK (0xf << 1)
+#define S3C_GAHBCFG_HBstLen_SHIFT (1)
+#define S3C_GAHBCFG_HBstLen_Single (0x0 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr (0x1 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr4 (0x3 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr8 (0x5 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr16 (0x7 << 1)
+#define S3C_GAHBCFG_GlblIntrEn (1 << 0)
+
+#define S3C_GUSBCFG S3C_HSOTG_REG(0x00C)
+#define S3C_GUSBCFG_PHYLPClkSel (1 << 15)
+#define S3C_GUSBCFG_HNPCap (1 << 9)
+#define S3C_GUSBCFG_SRPCap (1 << 8)
+#define S3C_GUSBCFG_PHYIf16 (1 << 3)
+#define S3C_GUSBCFG_TOutCal_MASK (0x7 << 0)
+#define S3C_GUSBCFG_TOutCal_SHIFT (0)
+#define S3C_GUSBCFG_TOutCal_LIMIT (0x7)
+#define S3C_GUSBCFG_TOutCal(_x) ((_x) << 0)
+
+#define S3C_GRSTCTL S3C_HSOTG_REG(0x010)
+
+#define S3C_GRSTCTL_AHBIdle (1 << 31)
+#define S3C_GRSTCTL_DMAReq (1 << 30)
+#define S3C_GRSTCTL_TxFNum_MASK (0x1f << 6)
+#define S3C_GRSTCTL_TxFNum_SHIFT (6)
+#define S3C_GRSTCTL_TxFNum_LIMIT (0x1f)
+#define S3C_GRSTCTL_TxFNum(_x) ((_x) << 6)
+#define S3C_GRSTCTL_TxFFlsh (1 << 5)
+#define S3C_GRSTCTL_RxFFlsh (1 << 4)
+#define S3C_GRSTCTL_INTknQFlsh (1 << 3)
+#define S3C_GRSTCTL_FrmCntrRst (1 << 2)
+#define S3C_GRSTCTL_HSftRst (1 << 1)
+#define S3C_GRSTCTL_CSftRst (1 << 0)
+
+#define S3C_GINTSTS S3C_HSOTG_REG(0x014)
+#define S3C_GINTMSK S3C_HSOTG_REG(0x018)
+
+#define S3C_GINTSTS_WkUpInt (1 << 31)
+#define S3C_GINTSTS_SessReqInt (1 << 30)
+#define S3C_GINTSTS_DisconnInt (1 << 29)
+#define S3C_GINTSTS_ConIDStsChng (1 << 28)
+#define S3C_GINTSTS_PTxFEmp (1 << 26)
+#define S3C_GINTSTS_HChInt (1 << 25)
+#define S3C_GINTSTS_PrtInt (1 << 24)
+#define S3C_GINTSTS_FetSusp (1 << 22)
+#define S3C_GINTSTS_incompIP (1 << 21)
+#define S3C_GINTSTS_IncomplSOIN (1 << 20)
+#define S3C_GINTSTS_OEPInt (1 << 19)
+#define S3C_GINTSTS_IEPInt (1 << 18)
+#define S3C_GINTSTS_EPMis (1 << 17)
+#define S3C_GINTSTS_EOPF (1 << 15)
+#define S3C_GINTSTS_ISOutDrop (1 << 14)
+#define S3C_GINTSTS_EnumDone (1 << 13)
+#define S3C_GINTSTS_USBRst (1 << 12)
+#define S3C_GINTSTS_USBSusp (1 << 11)
+#define S3C_GINTSTS_ErlySusp (1 << 10)
+#define S3C_GINTSTS_GOUTNakEff (1 << 7)
+#define S3C_GINTSTS_GINNakEff (1 << 6)
+#define S3C_GINTSTS_NPTxFEmp (1 << 5)
+#define S3C_GINTSTS_RxFLvl (1 << 4)
+#define S3C_GINTSTS_SOF (1 << 3)
+#define S3C_GINTSTS_OTGInt (1 << 2)
+#define S3C_GINTSTS_ModeMis (1 << 1)
+#define S3C_GINTSTS_CurMod_Host (1 << 0)
+
+#define S3C_GRXSTSR S3C_HSOTG_REG(0x01C)
+#define S3C_GRXSTSP S3C_HSOTG_REG(0x020)
+
+#define S3C_GRXSTS_FN_MASK (0x7f << 25)
+#define S3C_GRXSTS_FN_SHIFT (25)
+
+#define S3C_GRXSTS_PktSts_MASK (0xf << 17)
+#define S3C_GRXSTS_PktSts_SHIFT (17)
+#define S3C_GRXSTS_PktSts_GlobalOutNAK (0x1 << 17)
+#define S3C_GRXSTS_PktSts_OutRX (0x2 << 17)
+#define S3C_GRXSTS_PktSts_OutDone (0x3 << 17)
+#define S3C_GRXSTS_PktSts_SetupDone (0x4 << 17)
+#define S3C_GRXSTS_PktSts_SetupRX (0x6 << 17)
+
+#define S3C_GRXSTS_DPID_MASK (0x3 << 15)
+#define S3C_GRXSTS_DPID_SHIFT (15)
+#define S3C_GRXSTS_ByteCnt_MASK (0x7ff << 4)
+#define S3C_GRXSTS_ByteCnt_SHIFT (4)
+#define S3C_GRXSTS_EPNum_MASK (0xf << 0)
+#define S3C_GRXSTS_EPNum_SHIFT (0)
+
+#define S3C_GRXFSIZ S3C_HSOTG_REG(0x024)
+
+#define S3C_GNPTXFSIZ S3C_HSOTG_REG(0x028)
+
+#define S3C_GNPTXFSIZ_NPTxFDep_MASK (0xffff << 16)
+#define S3C_GNPTXFSIZ_NPTxFDep_SHIFT (16)
+#define S3C_GNPTXFSIZ_NPTxFDep_LIMIT (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFDep(_x) ((_x) << 16)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_MASK (0xffff << 0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_SHIFT (0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_LIMIT (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFStAddr(_x) ((_x) << 0)
+
+#define S3C_GNPTXSTS S3C_HSOTG_REG(0x02C)
+
+#define S3C_GNPTXSTS_NPtxQTop_MASK (0x7f << 24)
+#define S3C_GNPTXSTS_NPtxQTop_SHIFT (24)
+
+#define S3C_GNPTXSTS_NPTxQSpcAvail_MASK (0xff << 16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_SHIFT (16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_GET(_v) (((_v) >> 16) & 0xff)
+
+#define S3C_GNPTXSTS_NPTxFSpcAvail_MASK (0xffff << 0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_SHIFT (0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_GET(_v) (((_v) >> 0) & 0xffff)
+
+
+#define S3C_HPTXFSIZ S3C_HSOTG_REG(0x100)
+
+#define S3C_DPTXFSIZn(_a) S3C_HSOTG_REG(0x104 + (((_a) - 1) * 4))
+
+#define S3C_DPTXFSIZn_DPTxFSize_MASK (0xffff << 16)
+#define S3C_DPTXFSIZn_DPTxFSize_SHIFT (16)
+#define S3C_DPTXFSIZn_DPTxFSize_GET(_v) (((_v) >> 16) & 0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize_LIMIT (0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize(_x) ((_x) << 16)
+
+#define S3C_DPTXFSIZn_DPTxFStAddr_MASK (0xffff << 0)
+#define S3C_DPTXFSIZn_DPTxFStAddr_SHIFT (0)
+
+/* Device mode registers */
+#define S3C_DCFG S3C_HSOTG_REG(0x800)
+
+#define S3C_DCFG_EPMisCnt_MASK (0x1f << 18)
+#define S3C_DCFG_EPMisCnt_SHIFT (18)
+#define S3C_DCFG_EPMisCnt_LIMIT (0x1f)
+#define S3C_DCFG_EPMisCnt(_x) ((_x) << 18)
+
+#define S3C_DCFG_PerFrInt_MASK (0x3 << 11)
+#define S3C_DCFG_PerFrInt_SHIFT (11)
+#define S3C_DCFG_PerFrInt_LIMIT (0x3)
+#define S3C_DCFG_PerFrInt(_x) ((_x) << 11)
+
+#define S3C_DCFG_DevAddr_MASK (0x7f << 4)
+#define S3C_DCFG_DevAddr_SHIFT (4)
+#define S3C_DCFG_DevAddr_LIMIT (0x7f)
+#define S3C_DCFG_DevAddr(_x) ((_x) << 4)
+
+#define S3C_DCFG_NZStsOUTHShk (1 << 2)
+
+#define S3C_DCFG_DevSpd_MASK (0x3 << 0)
+#define S3C_DCFG_DevSpd_SHIFT (0)
+#define S3C_DCFG_DevSpd_HS (0x0 << 0)
+#define S3C_DCFG_DevSpd_FS (0x1 << 0)
+#define S3C_DCFG_DevSpd_LS (0x2 << 0)
+#define S3C_DCFG_DevSpd_FS48 (0x3 << 0)
+
+#define S3C_DCTL S3C_HSOTG_REG(0x804)
+
+#define S3C_DCTL_PWROnPrgDone (1 << 11)
+#define S3C_DCTL_CGOUTNak (1 << 10)
+#define S3C_DCTL_SGOUTNak (1 << 9)
+#define S3C_DCTL_CGNPInNAK (1 << 8)
+#define S3C_DCTL_SGNPInNAK (1 << 7)
+#define S3C_DCTL_TstCtl_MASK (0x7 << 4)
+#define S3C_DCTL_TstCtl_SHIFT (4)
+#define S3C_DCTL_GOUTNakSts (1 << 3)
+#define S3C_DCTL_GNPINNakSts (1 << 2)
+#define S3C_DCTL_SftDiscon (1 << 1)
+#define S3C_DCTL_RmtWkUpSig (1 << 0)
+
+#define S3C_DSTS S3C_HSOTG_REG(0x808)
+
+#define S3C_DSTS_SOFFN_MASK (0x3fff << 8)
+#define S3C_DSTS_SOFFN_SHIFT (8)
+#define S3C_DSTS_SOFFN_LIMIT (0x3fff)
+#define S3C_DSTS_SOFFN(_x) ((_x) << 8)
+#define S3C_DSTS_ErraticErr (1 << 3)
+#define S3C_DSTS_EnumSpd_MASK (0x3 << 1)
+#define S3C_DSTS_EnumSpd_SHIFT (1)
+#define S3C_DSTS_EnumSpd_HS (0x0 << 1)
+#define S3C_DSTS_EnumSpd_FS (0x1 << 1)
+#define S3C_DSTS_EnumSpd_LS (0x2 << 1)
+#define S3C_DSTS_EnumSpd_FS48 (0x3 << 1)
+
+#define S3C_DSTS_SuspSts (1 << 0)
+
+#define S3C_DIEPMSK S3C_HSOTG_REG(0x810)
+
+#define S3C_DIEPMSK_INEPNakEffMsk (1 << 6)
+#define S3C_DIEPMSK_INTknEPMisMsk (1 << 5)
+#define S3C_DIEPMSK_INTknTXFEmpMsk (1 << 4)
+#define S3C_DIEPMSK_TimeOUTMsk (1 << 3)
+#define S3C_DIEPMSK_AHBErrMsk (1 << 2)
+#define S3C_DIEPMSK_EPDisbldMsk (1 << 1)
+#define S3C_DIEPMSK_XferComplMsk (1 << 0)
+
+#define S3C_DOEPMSK S3C_HSOTG_REG(0x814)
+
+#define S3C_DOEPMSK_Back2BackSetup (1 << 6)
+#define S3C_DOEPMSK_OUTTknEPdisMsk (1 << 4)
+#define S3C_DOEPMSK_SetupMsk (1 << 3)
+#define S3C_DOEPMSK_AHBErrMsk (1 << 2)
+#define S3C_DOEPMSK_EPDisbldMsk (1 << 1)
+#define S3C_DOEPMSK_XferComplMsk (1 << 0)
+
+#define S3C_DAINT S3C_HSOTG_REG(0x818)
+#define S3C_DAINTMSK S3C_HSOTG_REG(0x81C)
+
+#define S3C_DAINT_OutEP_SHIFT (16)
+#define S3C_DAINT_OutEP(x) (1 << ((x) + 16))
+#define S3C_DAINT_InEP(x) (1 << (x))
+
+#define S3C_DTKNQR1 S3C_HSOTG_REG(0x820)
+#define S3C_DTKNQR2 S3C_HSOTG_REG(0x824)
+#define S3C_DTKNQR3 S3C_HSOTG_REG(0x830)
+#define S3C_DTKNQR4 S3C_HSOTG_REG(0x834)
+
+#define S3C_DVBUSDIS S3C_HSOTG_REG(0x828)
+#define S3C_DVBUSPULSE S3C_HSOTG_REG(0x82C)
+
+#define S3C_DIEPCTL0 S3C_HSOTG_REG(0x900)
+#define S3C_DOEPCTL0 S3C_HSOTG_REG(0xB00)
+#define S3C_DIEPCTL(_a) S3C_HSOTG_REG(0x900 + ((_a) * 0x20))
+#define S3C_DOEPCTL(_a) S3C_HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting differenct for EP0
+*/
+#define S3C_D0EPCTL_MPS_MASK (0x3 << 0)
+#define S3C_D0EPCTL_MPS_SHIFT (0)
+#define S3C_D0EPCTL_MPS_64 (0x0 << 0)
+#define S3C_D0EPCTL_MPS_32 (0x1 << 0)
+#define S3C_D0EPCTL_MPS_16 (0x2 << 0)
+#define S3C_D0EPCTL_MPS_8 (0x3 << 0)
+
+#define S3C_DxEPCTL_EPEna (1 << 31)
+#define S3C_DxEPCTL_EPDis (1 << 30)
+#define S3C_DxEPCTL_SetD1PID (1 << 29)
+#define S3C_DxEPCTL_SetOddFr (1 << 29)
+#define S3C_DxEPCTL_SetD0PID (1 << 28)
+#define S3C_DxEPCTL_SetEvenFr (1 << 28)
+#define S3C_DxEPCTL_SNAK (1 << 27)
+#define S3C_DxEPCTL_CNAK (1 << 26)
+#define S3C_DxEPCTL_TxFNum_MASK (0xf << 22)
+#define S3C_DxEPCTL_TxFNum_SHIFT (22)
+#define S3C_DxEPCTL_TxFNum_LIMIT (0xf)
+#define S3C_DxEPCTL_TxFNum(_x) ((_x) << 22)
+
+#define S3C_DxEPCTL_Stall (1 << 21)
+#define S3C_DxEPCTL_Snp (1 << 20)
+#define S3C_DxEPCTL_EPType_MASK (0x3 << 18)
+#define S3C_DxEPCTL_EPType_SHIFT (18)
+#define S3C_DxEPCTL_EPType_Control (0x0 << 18)
+#define S3C_DxEPCTL_EPType_Iso (0x1 << 18)
+#define S3C_DxEPCTL_EPType_Bulk (0x2 << 18)
+#define S3C_DxEPCTL_EPType_Intterupt (0x3 << 18)
+
+#define S3C_DxEPCTL_NAKsts (1 << 17)
+#define S3C_DxEPCTL_DPID (1 << 16)
+#define S3C_DxEPCTL_EOFrNum (1 << 16)
+#define S3C_DxEPCTL_USBActEp (1 << 15)
+#define S3C_DxEPCTL_NextEp_MASK (0xf << 11)
+#define S3C_DxEPCTL_NextEp_SHIFT (11)
+#define S3C_DxEPCTL_NextEp_LIMIT (0xf)
+#define S3C_DxEPCTL_NextEp(_x) ((_x) << 11)
+
+#define S3C_DxEPCTL_MPS_MASK (0x7ff << 0)
+#define S3C_DxEPCTL_MPS_SHIFT (0)
+#define S3C_DxEPCTL_MPS_LIMIT (0x7ff)
+#define S3C_DxEPCTL_MPS(_x) ((_x) << 0)
+
+#define S3C_DIEPINT(_a) S3C_HSOTG_REG(0x908 + ((_a) * 0x20))
+#define S3C_DOEPINT(_a) S3C_HSOTG_REG(0xB08 + ((_a) * 0x20))
+
+#define S3C_DxEPINT_INEPNakEff (1 << 6)
+#define S3C_DxEPINT_Back2BackSetup (1 << 6)
+#define S3C_DxEPINT_INTknEPMis (1 << 5)
+#define S3C_DxEPINT_INTknTXFEmp (1 << 4)
+#define S3C_DxEPINT_OUTTknEPdis (1 << 4)
+#define S3C_DxEPINT_Timeout (1 << 3)
+#define S3C_DxEPINT_Setup (1 << 3)
+#define S3C_DxEPINT_AHBErr (1 << 2)
+#define S3C_DxEPINT_EPDisbld (1 << 1)
+#define S3C_DxEPINT_XferCompl (1 << 0)
+
+#define S3C_DIEPTSIZ0 S3C_HSOTG_REG(0x910)
+
+#define S3C_DIEPTSIZ0_PktCnt_MASK (0x3 << 19)
+#define S3C_DIEPTSIZ0_PktCnt_SHIFT (19)
+#define S3C_DIEPTSIZ0_PktCnt_LIMIT (0x3)
+#define S3C_DIEPTSIZ0_PktCnt(_x) ((_x) << 19)
+
+#define S3C_DIEPTSIZ0_XferSize_MASK (0x7f << 0)
+#define S3C_DIEPTSIZ0_XferSize_SHIFT (0)
+#define S3C_DIEPTSIZ0_XferSize_LIMIT (0x7f)
+#define S3C_DIEPTSIZ0_XferSize(_x) ((_x) << 0)
+
+
+#define DOEPTSIZ0 S3C_HSOTG_REG(0xB10)
+#define S3C_DOEPTSIZ0_SUPCnt_MASK (0x3 << 29)
+#define S3C_DOEPTSIZ0_SUPCnt_SHIFT (29)
+#define S3C_DOEPTSIZ0_SUPCnt_LIMIT (0x3)
+#define S3C_DOEPTSIZ0_SUPCnt(_x) ((_x) << 29)
+
+#define S3C_DOEPTSIZ0_PktCnt (1 << 19)
+#define S3C_DOEPTSIZ0_XferSize_MASK (0x7f << 0)
+#define S3C_DOEPTSIZ0_XferSize_SHIFT (0)
+
+#define S3C_DIEPTSIZ(_a) S3C_HSOTG_REG(0x910 + ((_a) * 0x20))
+#define S3C_DOEPTSIZ(_a) S3C_HSOTG_REG(0xB10 + ((_a) * 0x20))
+
+#define S3C_DxEPTSIZ_MC_MASK (0x3 << 29)
+#define S3C_DxEPTSIZ_MC_SHIFT (29)
+#define S3C_DxEPTSIZ_MC_LIMIT (0x3)
+#define S3C_DxEPTSIZ_MC(_x) ((_x) << 29)
+
+#define S3C_DxEPTSIZ_PktCnt_MASK (0x3ff << 19)
+#define S3C_DxEPTSIZ_PktCnt_SHIFT (19)
+#define S3C_DxEPTSIZ_PktCnt_GET(_v) (((_v) >> 19) & 0x3ff)
+#define S3C_DxEPTSIZ_PktCnt_LIMIT (0x3ff)
+#define S3C_DxEPTSIZ_PktCnt(_x) ((_x) << 19)
+
+#define S3C_DxEPTSIZ_XferSize_MASK (0x7ffff << 0)
+#define S3C_DxEPTSIZ_XferSize_SHIFT (0)
+#define S3C_DxEPTSIZ_XferSize_GET(_v) (((_v) >> 0) & 0x7ffff)
+#define S3C_DxEPTSIZ_XferSize_LIMIT (0x7ffff)
+#define S3C_DxEPTSIZ_XferSize(_x) ((_x) << 0)
+
+
+#define S3C_DIEPDMA(_a) S3C_HSOTG_REG(0x914 + ((_a) * 0x20))
+#define S3C_DOEPDMA(_a) S3C_HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define S3C_EPFIFO(_a) S3C_HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_H */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index a60cfe75791..8ea0d942cde 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -6,59 +6,65 @@
mainmenu "Blackfin Kernel Configuration"
config MMU
- bool
- default n
+ def_bool n
config FPU
- bool
- default n
+ def_bool n
config RWSEM_GENERIC_SPINLOCK
- bool
- default y
+ def_bool y
config RWSEM_XCHGADD_ALGORITHM
- bool
- default n
+ def_bool n
config BLACKFIN
- bool
- default y
+ def_bool y
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_IDE
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZMA
select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+
config ZONE_DMA
- bool
- default y
+ def_bool y
config GENERIC_FIND_NEXT_BIT
- bool
- default y
+ def_bool y
config GENERIC_HWEIGHT
- bool
- default y
+ def_bool y
config GENERIC_HARDIRQS
- bool
- default y
+ def_bool y
config GENERIC_IRQ_PROBE
- bool
- default y
+ def_bool y
config GENERIC_GPIO
- bool
- default y
+ def_bool y
config FORCE_MAX_ZONEORDER
int
default "14"
config GENERIC_CALIBRATE_DELAY
- bool
- default y
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
source "init/Kconfig"
@@ -408,12 +414,12 @@ comment "Clock/PLL Setup"
config CLKIN_HZ
int "Frequency of the crystal on the board in Hz"
+ default "10000000" if BFIN532_IP0X
default "11059200" if BFIN533_STAMP
+ default "24576000" if PNAV10
+ default "25000000" # most people use this
default "27000000" if BFIN533_EZKIT
- default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD)
default "30000000" if BFIN561_EZKIT
- default "24576000" if PNAV10
- default "10000000" if BFIN532_IP0X
help
The frequency of CLKIN crystal oscillator on the board in Hz.
Warning: This value should match the crystal on the board. Otherwise,
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d54c8283825..6f9533c3d75 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -137,7 +137,7 @@ archclean:
INSTALL_PATH ?= /tftpboot
boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage
+BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma
PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(boot)/vmImage
@@ -150,7 +150,10 @@ install:
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
define archhelp
- echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)'
+ echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)'
+ echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
+ echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
+ echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or'
echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or'
diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore
index 3ae03994b88..229e5080867 100644
--- a/arch/blackfin/boot/.gitignore
+++ b/arch/blackfin/boot/.gitignore
@@ -1 +1,2 @@
-+vmImage
+vmImage*
+vmlinux*
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index e028d13481a..3ab6f23561d 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -8,24 +8,41 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
-targets := vmImage
-extra-y += vmlinux.bin vmlinux.gz
+targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma
+extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmImage: $(obj)/vmlinux.gz
- $(call if_changed,uimage)
- @$(kecho) 'Kernel: $@ is ready'
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2)
+
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
+$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+ $(call if_changed,uimage,bzip2)
+
+$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+ $(call if_changed,uimage,gzip)
+
+$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+ $(call if_changed,uimage,lzma)
+
+suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
+$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+ @ln -sf $(notdir $<) $@
install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 7bbf44e4ddf..b1d92f13ef9 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter += i;
@@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter -= i;
@@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
int __temp = 0;
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter += i;
@@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
int __temp = 0;
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter -= i;
@@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline void atomic_inc(volatile atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter++;
@@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v)
static inline void atomic_dec(volatile atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter--;
@@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter &= ~mask;
@@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter |= mask;
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index daffc0684e7..e39277ea43e 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -31,7 +31,7 @@
#ifndef __ASSEMBLY__
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
#include <asm/ptrace.h>
#include <asm/user.h>
#include <linux/linkage.h>
@@ -99,15 +99,6 @@ extern const char bfin_board_name[];
extern unsigned long bfin_sic_iwr[];
extern unsigned vr_wakeup;
extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
-extern unsigned long _ramstart, _ramend, _rambase;
-extern unsigned long memory_start, memory_end, physical_mem_end;
-extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
- _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
- _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
- _ebss_l2[], _l2_lma_start[];
-
-/* only used when MTD_UCLINUX */
-extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
#ifdef CONFIG_BFIN_ICACHE_LOCK
extern void cache_grab_lock(int way);
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 21b036eadab..75fee2f7d9f 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
- int mask, flags;
+ int mask;
+ unsigned long flags;
unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5;
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 6d3e11b1fc5..655e49540e4 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -2,13 +2,58 @@
#define _BLACKFIN_BUG_H
#ifdef CONFIG_BUG
-#define HAVE_ARCH_BUG
-#define BUG() do { \
- dump_bfin_trace_buffer(); \
- printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
- panic("BUG!"); \
-} while (0)
+#define BFIN_BUG_OPCODE 0xefcd
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .long %1\n" \
+ " .short %2\n" \
+ " .short %3\n" \
+ " .org 2b + %4\n" \
+ " .previous" \
+ : \
+ : "i"(BFIN_BUG_OPCODE), "i"(__FILE__), \
+ "i"(__LINE__), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#else
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .short %1\n" \
+ " .org 2b + %2\n" \
+ " .previous" \
+ : \
+ : "i"(BFIN_BUG_OPCODE), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG() \
+ do { \
+ _BUG_OR_WARN(0); \
+ for (;;); \
+ } while (0)
+
+#define WARN_ON(condition) \
+ ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ _BUG_OR_WARN(BUGFLAG_WARNING); \
+ unlikely(__ret_warn_on); \
+ })
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
#endif
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 86637814cf2..2ef669ed922 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -34,9 +34,13 @@
#define L1_CACHE_SHIFT_MAX 5
#if defined(CONFIG_SMP) && \
- !defined(CONFIG_BFIN_CACHE_COHERENT) && \
- defined(CONFIG_BFIN_DCACHE)
-#define __ARCH_SYNC_CORE_DCACHE
+ !defined(CONFIG_BFIN_CACHE_COHERENT)
+# if defined(CONFIG_BFIN_ICACHE)
+# define __ARCH_SYNC_CORE_ICACHE
+# endif
+# if defined(CONFIG_BFIN_DCACHE)
+# define __ARCH_SYNC_CORE_DCACHE
+# endif
#ifndef __ASSEMBLY__
asmlinkage void __raw_smp_mark_barrier_asm(void);
asmlinkage void __raw_smp_check_barrier_asm(void);
@@ -51,6 +55,7 @@ static inline void smp_check_barrier(void)
}
void resync_core_dcache(void);
+void resync_core_icache(void);
#endif
#endif
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 94697f0f6f4..5c17dee53b5 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page);
extern void blackfin_invalidate_entire_dcache(void);
+extern void blackfin_invalidate_entire_icache(void);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
@@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \
extern unsigned long reserved_mem_dcache_on;
extern unsigned long reserved_mem_icache_on;
-static inline int bfin_addr_dcachable(unsigned long addr)
+static inline int bfin_addr_dcacheable(unsigned long addr)
{
#ifdef CONFIG_BFIN_DCACHE
if (addr < (_ramend - DMA_UNCACHED_REGION))
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index c2594ef877f..565b8136855 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -34,6 +34,7 @@ struct blackfin_cpudata {
unsigned int dmemctl;
unsigned long loops_per_jiffy;
unsigned long dcache_invld_count;
+ unsigned long icache_invld_count;
};
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 40a8c178f10..8643680f0f7 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -1 +1,13 @@
-/* empty */
+/*
+ * Blackfin ftrace code
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_FTRACE_H__
+#define __ASM_BFIN_FTRACE_H__
+
+#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */
+
+#endif
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 51d0bf5e289..bbe1c3726b6 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,10 +35,10 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.9-01"
+#define IPIPE_ARCH_STRING "1.10-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 9
-#define IPIPE_PATCH_NUMBER 1
+#define IPIPE_MINOR_NUMBER 10
+#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
#error "I-pipe/blackfin: SMP not implemented"
@@ -54,10 +54,11 @@ do { \
#define task_hijacked(p) \
({ \
- int __x__ = ipipe_current_domain != ipipe_root_domain; \
- /* We would need to clear the SYNC flag for the root domain */ \
- /* over the current processor in SMP mode. */ \
- local_irq_enable_hw(); __x__; \
+ int __x__ = __ipipe_root_domain_p; \
+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
+ if (__x__) \
+ local_irq_enable_hw(); \
+ !__x__; \
})
struct ipipe_domain;
@@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define __ipipe_run_isr(ipd, irq) \
do { \
- if (ipd == ipipe_root_domain) { \
+ if (!__ipipe_pipeline_head_p(ipd)) \
local_irq_enable_hw(); \
- if (ipipe_virtual_irq_p(irq)) \
+ if (ipd == ipipe_root_domain) { \
+ if (unlikely(ipipe_virtual_irq_p(irq))) { \
+ irq_enter(); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
- else \
+ irq_exit(); \
+ } else \
ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
- local_irq_disable_hw(); \
} else { \
__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
- local_irq_enable_nohead(ipd); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
/* Attempt to exit the outer interrupt level before \
* starting the deferred IRQ processing. */ \
- local_irq_disable_nohead(ipd); \
__ipipe_run_irqtail(); \
__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
} \
+ local_irq_disable_hw(); \
} while (0)
#define __ipipe_syscall_watched_p(p, sc) \
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 7645e85a5f6..400bdd52ce8 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -17,270 +17,17 @@
#ifndef _BFIN_IRQ_H_
#define _BFIN_IRQ_H_
-/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
-#include <mach/irq.h>
-#include <asm/pda.h>
-#include <asm/processor.h>
-
-#ifdef CONFIG_SMP
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
-# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
-#else
-extern unsigned long bfin_irq_flags;
-#endif
-
-#ifdef CONFIG_IPIPE
-
-#include <linux/ipipe_trace.h>
+#include <linux/irqflags.h>
-void __ipipe_unstall_root(void);
-
-void __ipipe_restore_root(unsigned long flags);
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __all_masked_irq_flags 0x3f
-# define __save_and_cli_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %1;" \
- : "=&d"(x) \
- : "d" (0x3F) \
- )
-#else
-# define __all_masked_irq_flags 0x1f
-# define __save_and_cli_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=&d"(x) \
- )
-#endif
-
-#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags)
-#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
-#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
-
-#define local_save_flags(x) \
- do { \
- (x) = __ipipe_test_root() ? \
- __all_masked_irq_flags : bfin_irq_flags; \
- barrier(); \
- } while (0)
-
-#define local_irq_save(x) \
- do { \
- (x) = __ipipe_test_and_stall_root() ? \
- __all_masked_irq_flags : bfin_irq_flags; \
- barrier(); \
- } while (0)
-
-static inline void local_irq_restore(unsigned long x)
-{
- barrier();
- __ipipe_restore_root(x == __all_masked_irq_flags);
-}
-
-#define local_irq_disable() \
- do { \
- __ipipe_stall_root(); \
- barrier(); \
- } while (0)
-
-static inline void local_irq_enable(void)
-{
- barrier();
- __ipipe_unstall_root();
-}
-
-#define irqs_disabled() __ipipe_test_root()
-
-#define local_save_flags_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %0;" \
- : "=d"(x) \
- )
-
-#define irqs_disabled_hw() \
- ({ \
- unsigned long flags; \
- local_save_flags_hw(flags); \
- !irqs_enabled_from_flags_hw(flags); \
- })
-
-static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
-{
- /* Merge virtual and real interrupt mask bits into a single
- 32bit word. */
- return (real & ~(1 << 31)) | ((virt != 0) << 31);
-}
-
-static inline int raw_demangle_irq_bits(unsigned long *x)
-{
- int virt = (*x & (1 << 31)) != 0;
- *x &= ~(1L << 31);
- return virt;
-}
-
-#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
-
-#define local_irq_disable_hw() \
- do { \
- int _tmp_dummy; \
- if (!irqs_disabled_hw()) \
- ipipe_trace_begin(0x80000000); \
- __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
- } while (0)
-
-#define local_irq_enable_hw() \
- do { \
- if (irqs_disabled_hw()) \
- ipipe_trace_end(0x80000000); \
- __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \
- } while (0)
-
-#define local_irq_save_hw(x) \
- do { \
- __save_and_cli_hw(x); \
- if (local_test_iflag_hw(x)) \
- ipipe_trace_begin(0x80000001); \
- } while (0)
-
-#define local_irq_restore_hw(x) \
- do { \
- if (local_test_iflag_hw(x)) { \
- ipipe_trace_end(0x80000001); \
- local_irq_enable_hw_notrace(); \
- } \
- } while (0)
-
-#define local_irq_disable_hw_notrace() \
- do { \
- int _tmp_dummy; \
- __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
- } while (0)
-
-#define local_irq_enable_hw_notrace() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d"(bfin_irq_flags) \
- )
-
-#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
-
-#define local_irq_restore_hw_notrace(x) \
- do { \
- if (local_test_iflag_hw(x)) \
- local_irq_enable_hw_notrace(); \
- } while (0)
-
-#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#define local_irq_enable_hw() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d"(bfin_irq_flags) \
- )
-
-#define local_irq_disable_hw() \
- do { \
- int _tmp_dummy; \
- __asm__ __volatile__ ( \
- "cli %0;" \
- : "=d" (_tmp_dummy)); \
- } while (0)
-
-#define local_irq_restore_hw(x) \
- do { \
- if (irqs_enabled_from_flags_hw(x)) \
- local_irq_enable_hw(); \
- } while (0)
-
-#define local_irq_save_hw(x) __save_and_cli_hw(x)
-
-#define local_irq_disable_hw_notrace() local_irq_disable_hw()
-#define local_irq_enable_hw_notrace() local_irq_enable_hw()
-#define local_irq_save_hw_notrace(x) local_irq_save_hw(x)
-#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x)
-
-#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#else /* !CONFIG_IPIPE */
-
-/*
- * Interrupt configuring macros.
- */
-#define local_irq_disable() \
- do { \
- int __tmp_dummy; \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=d" (__tmp_dummy) \
- ); \
- } while (0)
-
-#define local_irq_enable() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d" (bfin_irq_flags) \
- )
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %1;" \
- : "=&d" (x) \
- : "d" (0x3F) \
- )
-#else
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=&d" (x) \
- )
-#endif
-
-#define local_save_flags(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %0;" \
- : "=d" (x) \
- )
-
-#ifdef CONFIG_DEBUG_HWERR
-#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
-#else
-#define irqs_enabled_from_flags(x) ((x) != 0x1f)
-#endif
-
-#define local_irq_restore(x) \
- do { \
- if (irqs_enabled_from_flags(x)) \
- local_irq_enable(); \
- } while (0)
-
-/* For spinlocks etc */
-#define local_irq_save(x) __save_and_cli(x)
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !irqs_enabled_from_flags(flags); \
-})
-
-#define local_irq_save_hw(x) local_irq_save(x)
-#define local_irq_restore_hw(x) local_irq_restore(x)
-#define local_irq_enable_hw() local_irq_enable()
-#define local_irq_disable_hw() local_irq_disable()
-#define irqs_disabled_hw() irqs_disabled()
+/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
+#include <mach/irq.h>
-#endif /* !CONFIG_IPIPE */
+/* Xenomai IPIPE helpers */
+#define local_irq_restore_hw(x) local_irq_restore(x)
+#define local_irq_save_hw(x) local_irq_save(x)
+#define local_irq_enable_hw(x) local_irq_enable(x)
+#define local_irq_disable_hw(x) local_irq_disable(x)
+#define irqs_disabled_hw(x) irqs_disabled(x)
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
new file mode 100644
index 00000000000..139cba4651b
--- /dev/null
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -0,0 +1,63 @@
+/*
+ * interface to Blackfin CEC
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_IRQFLAGS_H__
+#define __ASM_BFIN_IRQFLAGS_H__
+
+#ifdef CONFIG_SMP
+# include <asm/pda.h>
+# include <asm/processor.h>
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
+
+static inline void bfin_sti(unsigned long flags)
+{
+ asm volatile("sti %0;" : : "d" (flags));
+}
+
+static inline unsigned long bfin_cli(void)
+{
+ unsigned long flags;
+ asm volatile("cli %0;" : "=d" (flags));
+ return flags;
+}
+
+static inline void raw_local_irq_disable(void)
+{
+ bfin_cli();
+}
+static inline void raw_local_irq_enable(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+#define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0)
+
+#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ if (!raw_irqs_disabled_flags(flags))
+ raw_local_irq_enable();
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+ bfin_sti(0x3f);
+#endif
+ return flags;
+}
+#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h
deleted file mode 100644
index 0134151656a..00000000000
--- a/arch/blackfin/include/asm/mutex-dec.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * include/asm-generic/mutex-dec.h
- *
- * Generic implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- */
-#ifndef _ASM_GENERIC_MUTEX_DEC_H
-#define _ASM_GENERIC_MUTEX_DEC_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- fail_fn(count);
- else
- smp_mb();
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
- smp_mb();
- if (unlikely(atomic_inc_return(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
- return 1;
- }
- return 0;
-#else
- return fail_fn(count);
-#endif
-}
-
-#endif
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 1443c3353a8..e7fd0ecd73f 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -4,4 +4,15 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
+/* only used when MTD_UCLINUX */
+extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
+
+extern unsigned long _ramstart, _ramend, _rambase;
+extern unsigned long memory_start, memory_end, physical_mem_end;
+
+extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
+ _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
+ _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
+ _ebss_l2[], _l2_lma_start[];
+
#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index a4c8254bec5..294dbda2416 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -35,10 +35,10 @@
#define _BLACKFIN_SYSTEM_H
#include <linux/linkage.h>
-#include <linux/compiler.h>
+#include <linux/irqflags.h>
#include <mach/anomaly.h>
+#include <asm/cache.h>
#include <asm/pda.h>
-#include <asm/processor.h>
#include <asm/irq.h>
/*
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index cf5066d3efd..da35133c171 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -380,8 +380,9 @@
#define __NR_inotify_init1 365
#define __NR_preadv 366
#define __NR_pwritev 367
+#define __NR_rt_tgsigqueueinfo 368
-#define __NR_syscall 368
+#define __NR_syscall 369
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index fd4d4328a0f..3731088e181 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -15,6 +15,10 @@ else
obj-y += time.o
endif
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+CFLAGS_REMOVE_ftrace.o = -pg
+
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
@@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
# the kgdb test puts code into L2 and without linker
# relaxation, we need to force long calls to/from it
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 763ed84ba45..e0bf8cc0690 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size)
unsigned long src = (unsigned long)psrc;
size_t bulk, rest;
- if (bfin_addr_dcachable(src))
+ if (bfin_addr_dcacheable(src))
blackfin_dcache_flush_range(src, src + size);
- if (bfin_addr_dcachable(dst))
+ if (bfin_addr_dcacheable(dst))
blackfin_dcache_invalidate_range(dst, dst + size);
bulk = size & ~0xffff;
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 53e893ff708..aa05e638fb7 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif
#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 87463ce87f5..784923e52a9 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu)
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_DCACHE
- if (bfin_addr_dcachable(addr)) {
+ if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#ifdef CONFIG_BFIN_WT
d_data |= CPLB_L1_AOW | CPLB_WT;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 8cbb47c7b66..12b030842fd 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -28,6 +28,7 @@
#include <asm/cplbinit.h>
#include <asm/cplb.h>
#include <asm/mmu_context.h>
+#include <asm/traps.h>
/*
* WARNING
@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data,
#endif
}
-/*
- * Given the contents of the status register, return the index of the
- * CPLB that caused the fault.
- */
-static inline int faulting_cplb_index(int status)
-{
- int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
- return 30 - signbits;
-}
-
-/*
- * Given the contents of the status register and the DCPLB_DATA contents,
- * return true if a write access should be permitted.
- */
-static inline int write_permitted(int status, unsigned long data)
-{
- if (status & FAULT_USERSUPV)
- return !!(data & CPLB_SUPV_WR);
- else
- return !!(data & CPLB_USER_WR);
-}
-
/* Counters to implement round-robin replacement. */
static int icplb_rr_index[NR_CPUS] PDT_ATTR;
static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu)
return CPLB_RELOADED;
}
-MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
-{
- int status = bfin_read_DCPLB_STATUS();
-
- nr_dcplb_prot[cpu]++;
-
- if (likely(status & FAULT_RW)) {
- int idx = faulting_cplb_index(status);
- unsigned long regaddr = DCPLB_DATA0 + idx * 4;
- unsigned long data = bfin_read32(regaddr);
-
- /* Check if fault is to dirty a clean page */
- if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
- write_permitted(status, data)) {
-
- dcplb_tbl[cpu][idx].data = data;
- bfin_write32(regaddr, data);
- return CPLB_RELOADED;
- }
- }
-
- return CPLB_PROT_VIOL;
-}
-
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
unsigned int cpu = smp_processor_id();
switch (cause) {
- case 0x2C:
+ case VEC_CPLB_I_M:
return icplb_miss(cpu);
- case 0x26:
+ case VEC_CPLB_M:
return dcplb_miss(cpu);
default:
- if (unlikely(cause == 0x23))
- return dcplb_protection_fault(cpu);
-
return CPLB_UNKNOWN_ERR;
}
}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 3302719173c..2ab56811841 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void)
asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
{
/* This can happen before the uart is initialized, so initialize
- * the UART now
+ * the UART now (but only if we are running on the processor we think
+ * we are compiled for - otherwise we write to MMRs that don't exist,
+ * and cause other problems. Nothing comes out the UART, but it does
+ * end up in the __buf_log.
*/
- if (likely(early_console == NULL))
+ if (likely(early_console == NULL) && CPUID == bfin_cpuid())
setup_early_printk(DEFAULT_EARLY_PORT);
+ printk(KERN_EMERG "Early panic\n");
dump_bfin_mem(fp);
show_regs(fp);
dump_bfin_trace_buffer();
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
new file mode 100644
index 00000000000..6980b7a0615
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -0,0 +1,140 @@
+/*
+ * mcount and friends -- ftrace stuff
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+.text
+
+/* GCC will have called us before setting up the function prologue, so we
+ * can clobber the normal scratch registers, but we need to make sure to
+ * save/restore the registers used for argument passing (R0-R2) in case
+ * the profiled function is using them. With data registers, R3 is the
+ * only one we can blow away. With pointer registers, we have P0-P2.
+ *
+ * Upon entry, the RETS will point to the top of the current profiled
+ * function. And since GCC setup the frame for us, the previous function
+ * will be waiting there. mmmm pie.
+ */
+ENTRY(__mcount)
+ /* save third function arg early so we can do testing below */
+ [--sp] = r2;
+
+ /* load the function pointer to the tracer */
+ p0.l = _ftrace_trace_function;
+ p0.h = _ftrace_trace_function;
+ r3 = [p0];
+
+ /* optional micro optimization: don't call the stub tracer */
+ r2.l = _ftrace_stub;
+ r2.h = _ftrace_stub;
+ cc = r2 == r3;
+ if ! cc jump .Ldo_trace;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* if the ftrace_graph_return function pointer is not set to
+ * the ftrace_stub entry, call prepare_ftrace_return().
+ */
+ p0.l = _ftrace_graph_return;
+ p0.h = _ftrace_graph_return;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+
+ /* similarly, if the ftrace_graph_entry function pointer is not
+ * set to the ftrace_graph_entry_stub entry, ...
+ */
+ p0.l = _ftrace_graph_entry;
+ p0.h = _ftrace_graph_entry;
+ r2.l = _ftrace_graph_entry_stub;
+ r2.h = _ftrace_graph_entry_stub;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+#endif
+
+ r2 = [sp++];
+ rts;
+
+.Ldo_trace:
+
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ /* setup the tracer function */
+ p0 = r3;
+
+ /* tracer(ulong frompc, ulong selfpc):
+ * frompc: the pc that did the call to ...
+ * selfpc: ... this location
+ * the selfpc itself will need adjusting for the mcount call
+ */
+ r1 = rets;
+ r0 = [fp + 4];
+ r1 += -MCOUNT_INSN_SIZE;
+
+ /* call the tracer */
+ call (p0);
+
+ /* restore state and get out of dodge */
+.Lfinish_trace:
+ rets = [sp++];
+ r1 = [sp++];
+ r0 = [sp++];
+ r2 = [sp++];
+
+.globl _ftrace_stub
+_ftrace_stub:
+ rts;
+ENDPROC(__mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* The prepare_ftrace_return() function is similar to the trace function
+ * except it takes a pointer to the location of the frompc. This is so
+ * the prepare_ftrace_return() can hijack it temporarily for probing
+ * purposes.
+ */
+ENTRY(_ftrace_graph_caller)
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ r0 = fp;
+ r1 = rets;
+ r0 += 4;
+ r1 += -MCOUNT_INSN_SIZE;
+ call _prepare_ftrace_return;
+
+ jump .Lfinish_trace;
+ENDPROC(_ftrace_graph_caller)
+
+/* Undo the rewrite caused by ftrace_graph_caller(). The common function
+ * ftrace_return_to_handler() will return the original rets so we can
+ * restore it and be on our way.
+ */
+ENTRY(_return_to_handler)
+ /* make sure original return values are saved */
+ [--sp] = p0;
+ [--sp] = r0;
+ [--sp] = r1;
+
+ /* get original return address */
+ call _ftrace_return_to_handler;
+ rets = r0;
+
+ /* anomaly 05000371 - make sure we have at least three instructions
+ * between rets setting and the return
+ */
+ r1 = [sp++];
+ r0 = [sp++];
+ p0 = [sp++];
+ rts;
+ENDPROC(_return_to_handler)
+#endif
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
new file mode 100644
index 00000000000..905bfc40a00
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace.c
@@ -0,0 +1,42 @@
+/*
+ * ftrace graph code
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
+ return;
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ return;
+ }
+
+ /* all is well in the world ! hijack RETS ... */
+ *parent = return_hooker;
+}
+
+#endif
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 5fc424803a1..d8cde1fc5cb 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* interrupt.
*/
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
- this_domain = ipipe_current_domain;
+ this_domain = __ipipe_current_domain;
if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
head = &this_domain->p_link;
@@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void)
int __ipipe_syscall_root(struct pt_regs *regs)
{
+ struct ipipe_percpu_domain_data *p;
unsigned long flags;
+ int ret;
/*
* We need to run the IRQ tail hook whenever we don't
@@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs)
/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
- * 1 -- if the syscall should not be passed to Linux, and no
+ * >0 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
- * -1 -- if the syscall should not be passed to Linux but the
+ * <0 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/
- if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
- __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
- if (ipipe_root_domain_p && !in_atomic()) {
- /*
- * Sync pending VIRQs before _TIF_NEED_RESCHED
- * is tested.
- */
- local_irq_save_hw(flags);
- if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
- __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
- local_irq_restore_hw(flags);
- return -1;
- }
+ if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+ return 0;
+
+ ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
+
+ local_irq_save_hw(flags);
+
+ if (!__ipipe_root_domain_p) {
+ local_irq_restore_hw(flags);
return 1;
}
- return 0;
+ p = ipipe_root_cpudom_ptr();
+ if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
+ __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+
+ local_irq_restore_hw(flags);
+
+ return -ret;
}
unsigned long ipipe_critical_enter(void (*syncfn) (void))
@@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void)
void ___ipipe_sync_pipeline(unsigned long syncmask)
{
- struct ipipe_domain *ipd = ipipe_current_domain;
-
- if (ipd == ipipe_root_domain) {
+ if (__ipipe_root_domain_p) {
if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
return;
}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 80447f99c2b..6454babdfaf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
CPUID, bfin_cpuid());
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
- "stepping\t: %d\n",
+ "stepping\t: %d ",
cpu, cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU
"mpu on",
@@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif
revid);
- seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+ if (bfin_revid() != bfin_compiled_revid()) {
+ if (bfin_compiled_revid() == -1)
+ seq_printf(m, "(Compiled for Rev none)");
+ else if (bfin_compiled_revid() == 0xffff)
+ seq_printf(m, "(Compiled for Rev any)");
+ else
+ seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
+ }
+
+ seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
cclk/1000000, cclk%1000000,
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
@@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef __ARCH_SYNC_CORE_DCACHE
seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
#endif
+#ifdef __ARCH_SYNC_CORE_ICACHE
+ seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
+#endif
#ifdef CONFIG_BFIN_ICACHE_LOCK
switch ((cpudata->imemctl >> 3) & WAYALL_L) {
case WAY0_L:
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c
new file mode 100644
index 00000000000..30301e1eace
--- /dev/null
+++ b/arch/blackfin/kernel/stacktrace.c
@@ -0,0 +1,53 @@
+/*
+ * Blackfin stacktrace code (mostly copied from avr32)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+register unsigned long current_frame_pointer asm("FP");
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+};
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long low, high;
+ unsigned long fp;
+ struct stackframe *frame;
+ int skip = trace->skip;
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+ fp = current_frame_pointer;
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+
+ if (skip) {
+ skip--;
+ } else {
+ trace->entries[trace->nr_entries++] = frame->rets;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+
+ /*
+ * The next frame must be at a higher address than the
+ * current frame.
+ */
+ low = fp + sizeof(*frame);
+ fp = frame->fp;
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index aa76dfb0226..d279552fe9b 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -27,6 +27,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/bug.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -238,6 +239,11 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
}
+static int kernel_mode_regs(struct pt_regs *regs)
+{
+ return regs->ipend & 0xffc0;
+}
+
asmlinkage void trap_c(struct pt_regs *fp)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
@@ -246,6 +252,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
unsigned int cpu = smp_processor_id();
#endif
+ const char *strerror = NULL;
int sig = 0;
siginfo_t info;
unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -259,27 +266,10 @@ asmlinkage void trap_c(struct pt_regs *fp)
* double faults if the stack has become corrupt
*/
- /* If the fault was caused by a kernel thread, or interrupt handler
- * we will kernel panic, so the system reboots.
- * If KGDB is enabled, don't set this for kernel breakpoints
- */
-
- /* TODO: check to see if we are in some sort of deferred HWERR
- * that we should be able to recover from, not kernel panic
- */
- if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP)
-#ifdef CONFIG_KGDB
- && (trapnr != VEC_EXCPT02)
+#ifndef CONFIG_KGDB
+ /* IPEND is skipped if KGDB isn't enabled (see entry code) */
+ fp->ipend = bfin_read_IPEND();
#endif
- ){
- console_verbose();
- oops_in_progress = 1;
- } else if (current) {
- if (current->mm == NULL) {
- console_verbose();
- oops_in_progress = 1;
- }
- }
/* trap_c() will be called for exceptions. During exceptions
* processing, the pc value should be set with retx value.
@@ -307,15 +297,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGTRAP;
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a breakpoint in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
/* 0x03 - User Defined, userspace stack overflow */
case VEC_EXCPT03:
info.si_code = SEGV_STACKFLOW;
sig = SIGSEGV;
- verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x02 - KGDB initial connection and break signal trap */
@@ -324,7 +314,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
info.si_code = TRAP_ILLTRAP;
sig = SIGTRAP;
CHK_DEBUGGER_TRAP();
- return;
+ goto traps_done;
#endif
/* 0x04 - User Defined */
/* 0x05 - User Defined */
@@ -344,7 +334,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_EXCPT04 ... VEC_EXCPT15:
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x10 HW Single step, handled here */
@@ -353,15 +343,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGTRAP;
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a single step in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
/* 0x11 - Trace Buffer Full, handled here */
case VEC_OVFLOW:
info.si_code = TRAP_TRACEFLOW;
sig = SIGTRAP;
- verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x12 - Reserved, Caught by default */
@@ -381,37 +371,54 @@ asmlinkage void trap_c(struct pt_regs *fp)
/* 0x20 - Reserved, Caught by default */
/* 0x21 - Undefined Instruction, handled here */
case VEC_UNDEF_I:
+#ifdef CONFIG_BUG
+ if (kernel_mode_regs(fp)) {
+ switch (report_bug(fp->pc, fp)) {
+ case BUG_TRAP_TYPE_NONE:
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ dump_bfin_trace_buffer();
+ fp->pc += 2;
+ goto traps_done;
+ case BUG_TRAP_TYPE_BUG:
+ /* call to panic() will dump trace, and it is
+ * off at this point, so it won't be clobbered
+ */
+ panic("BUG()");
+ }
+ }
+#endif
info.si_code = ILL_ILLOPC;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x22 - Illegal Instruction Combination, handled here */
case VEC_ILGAL_I:
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x23 - Data CPLB protection violation, handled here */
case VEC_CPLB_VL:
info.si_code = ILL_CPLB_VI;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x24 - Data access misaligned, handled here */
case VEC_MISALI_D:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x25 - Unrecoverable Event, handled here */
case VEC_UNCOV:
info.si_code = ILL_ILLEXCPT;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
@@ -419,7 +426,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_CPLB_M:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
break;
/* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
case VEC_CPLB_MHIT:
@@ -427,10 +434,10 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGSEGV;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "NULL pointer access\n");
+ strerror = KERN_NOTICE "NULL pointer access\n";
else
#endif
- verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x28 - Emulation Watchpoint, handled here */
@@ -440,8 +447,8 @@ asmlinkage void trap_c(struct pt_regs *fp)
pr_debug(EXC_0x28(KERN_DEBUG));
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a watchpoint in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
#ifdef CONFIG_BF535
@@ -449,7 +456,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
info.si_code = BUS_OPFETCH;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n");
+ strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
CHK_DEBUGGER_TRAP_MAYBE();
break;
#else
@@ -459,21 +466,21 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_MISALI_I:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2B - Instruction CPLB protection violation, handled here */
case VEC_CPLB_I_VL:
info.si_code = ILL_CPLB_VI;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
case VEC_CPLB_I_M:
info.si_code = ILL_CPLB_MISS;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
break;
/* 0x2D - Instruction CPLB Multiple Hits, handled here */
case VEC_CPLB_I_MHIT:
@@ -481,17 +488,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGSEGV;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "Jump to NULL address\n");
+ strerror = KERN_NOTICE "Jump to NULL address\n";
else
#endif
- verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2E - Illegal use of Supervisor Resource, handled here */
case VEC_ILL_RES:
info.si_code = ILL_PRVOPC;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2F - Reserved, Caught by default */
@@ -519,17 +526,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
break;
/* External Memory Addressing Error */
case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
info.si_code = BUS_ADRERR;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
break;
/* Performance Monitor Overflow */
case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
- verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
break;
/* RAISE 5 instruction */
case (SEQSTAT_HWERRCAUSE_RAISE_5):
@@ -546,7 +553,6 @@ asmlinkage void trap_c(struct pt_regs *fp)
* if we get here we hit a reserved one, so panic
*/
default:
- oops_in_progress = 1;
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
@@ -557,6 +563,16 @@ asmlinkage void trap_c(struct pt_regs *fp)
BUG_ON(sig == 0);
+ /* If the fault was caused by a kernel thread, or interrupt handler
+ * we will kernel panic, so the system reboots.
+ */
+ if (kernel_mode_regs(fp) || (current && !current->mm)) {
+ console_verbose();
+ oops_in_progress = 1;
+ if (strerror)
+ verbose_printk(strerror);
+ }
+
if (sig != SIGTRAP) {
dump_bfin_process(fp);
dump_bfin_mem(fp);
@@ -606,8 +622,8 @@ asmlinkage void trap_c(struct pt_regs *fp)
if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
fp->pc = SAFE_USER_INSTRUCTION;
+ traps_done:
trace_buffer_restore(j);
- return;
}
/* Typical exception handling routines */
@@ -792,6 +808,18 @@ void dump_bfin_trace_buffer(void)
}
EXPORT_SYMBOL(dump_bfin_trace_buffer);
+#ifdef CONFIG_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ unsigned short opcode;
+
+ if (!get_instruction(&opcode, (unsigned short *)addr))
+ return 0;
+
+ return opcode == BFIN_BUG_OPCODE;
+}
+#endif
+
/*
* Checks to see if the address pointed to is either a
* 16-bit CALL instruction, or a 32-bit CALL instruction
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 8b67167cb4f..6ac307ca0d8 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -54,6 +54,7 @@ SECTIONS
SCHED_TEXT
#endif
LOCK_TEXT
+ IRQENTRY_TEXT
KPROBES_TEXT
*(.text.*)
*(.fixup)
@@ -166,6 +167,20 @@ SECTIONS
}
PERCPU(4)
SECURITY_INIT
+
+ /* we have to discard exit text and such at runtime, not link time, to
+ * handle embedded cross-section references (alt instructions, bug
+ * table, eh_frame, etc...)
+ */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+
.init.ramfs :
{
. = ALIGN(4);
@@ -264,8 +279,6 @@ SECTIONS
/DISCARD/ :
{
- EXIT_TEXT
- EXIT_DATA
*(.exitcall.exit)
}
}
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c
index 762a7f02970..cd605e7d851 100644
--- a/arch/blackfin/lib/checksum.c
+++ b/arch/blackfin/lib/checksum.c
@@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~do_csum(buff, len);
}
+EXPORT_SYMBOL(ip_compute_csum);
/*
* copy from fs while checksumming, otherwise like csum_partial
@@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
memcpy(dst, (__force void *)src, len);
return csum_partial(dst, len, sum);
}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 62bba09bcce..1382f038235 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
+ .chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
@@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 6d6f9effa0b..1eaf27ff722 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 1435c5d38cd..9f9c0005dcf 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 147edd1eb1a..3e5b7db6b06 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 895f213ea45..38cf8ffd6d7 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 0765872a8ad..9ecdc361fa6 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index a727e538fa2..1443e92d8b6 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 842f1c9c239..89a5ec4ca04 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index e19c565ade1..a68ade8a3ca 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 4fee1967312..2a87d1cfcd0 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 3c159819e55..399f81da7b9 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 26707ce39f2..838240f151f 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index dfb5036f8a6..ff7228caa7d 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{
I2C_BOARD_INFO("pmic-adp5520", 0x32),
- .irq = IRQ_PF7,
+ .irq = IRQ_PG0,
.platform_data = (void *)&adp5520_pdev_data,
},
#endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 28057459120..e523e6e610d 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e37cb937888..57695b4c3c0 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f53ad682530..f5a3c30a41b 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index add5a17452c..805a57b5e65 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = {
#endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#include <linux/smsc911x.h>
+
static struct resource smsc911x_resources[] = {
{
.name = "smsc911x-memory",
@@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = {
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = 0,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ },
};
#endif
@@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 0dd9685e5d5..0c9d72c5f5b 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 0e2178a1aec..b5ef7ff7b7b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index e6ab1f81512..b59ce3cb380 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -16,9 +16,21 @@
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
- SSYNC();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
+
+/* Invalidate the Entire Instruction cache by
+ * clearing IMC bit
+ */
+void blackfin_invalidate_entire_icache(void)
+{
+ u32 imem = bfin_read_IMEM_CONTROL();
+ bfin_write_IMEM_CONTROL(imem & ~0x4);
+ SSYNC();
+ bfin_write_IMEM_CONTROL(imem);
+ SSYNC();
+}
+
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index da0558ad1b1..31fa313e81c 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -42,6 +42,7 @@
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h>
#include <asm/trace.h>
+#include <asm/traps.h>
#include <asm/context.S>
@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception;
/* fall through */
R7 = P4;
- R6 = 0x26; /* Data CPLB Miss */
+ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP);
- R6 = 0x23; /* Data CPLB Miss */
+#ifdef CONFIG_MPU
+ R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP);
- /* Handle 0x23 Data CPLB Protection Violation
+#endif
+ /* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero
*/
jump _ex_trap_c;
@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
- r6 = 0x25;
+ r6 = VEC_UNCOV;
CC = R7 == R6;
if CC JUMP _double_fault;
#endif
@@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table)
.long _sys_inotify_init1 /* 365 */
.long _sys_preadv
.long _sys_pwritev
+ .long _sys_rt_tgsigqueueinfo
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 3b8ebaee77f..61840059dfa 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
msg_queue->count++;
spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
@@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu)
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -320,7 +321,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+ unsigned int cpu = get_cpu();
+ blackfin_invalidate_entire_icache();
+ ++per_cpu(cpu_data, cpu).icache_invld_count;
+ put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 783da855a2e..d6d35b2e5fe 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -963,7 +963,7 @@ CONFIG_EEPROM_LEGACY=y
CONFIG_SENSORS_PCF8574=y
# CONFIG_PCF8575 is not set
CONFIG_SENSORS_PCF8591=y
-CONFIG_SENSORS_MAX6875=y
+CONFIG_EEPROM_MAX6875=y
# CONFIG_SENSORS_TSL2550 is not set
CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 8426d3b9501..fadb351d249 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1849,7 +1849,7 @@ CONFIG_EEPROM_LEGACY=m
CONFIG_SENSORS_PCF8574=m
CONFIG_SENSORS_PCA9539=m
CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
index 7396cd71990..6827feb4de9 100644
--- a/arch/mips/sni/eisa.c
+++ b/arch/mips/sni/eisa.c
@@ -38,7 +38,7 @@ int __init sni_eisa_root_init(void)
if (!r)
return r;
- eisa_root_dev.dev.driver_data = &eisa_bus_root;
+ dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
if (eisa_root_register(&eisa_bus_root)) {
/* A real bridge may have been registered before
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 93a61898b25..9fb344d5a86 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -93,10 +93,6 @@ config GENERIC_HWEIGHT
bool
default y
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
config GENERIC_FIND_NEXT_BIT
bool
default y
@@ -129,6 +125,7 @@ config PPC
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
+ select GENERIC_ATOMIC64 if PPC32
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 51b2387bdba..98312d169c8 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -18,6 +18,9 @@
# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
#
+# Bail with error code if anything goes wrong
+set -e
+
# User may have a custom install script
if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7d044dfd923..12dc7c40961 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1808,7 +1808,7 @@ CONFIG_PCF8575=m
CONFIG_SENSORS_PCA9539=m
CONFIG_SENSORS_PCF8591=m
# CONFIG_TPS65010 is not set
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
CONFIG_SENSORS_TSL2550=m
CONFIG_MCU_MPC8349EMITX=m
# CONFIG_I2C_DEBUG_CORE is not set
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b7d2d07b6f9..4012483b189 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -470,6 +470,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#else /* __powerpc64__ */
+#include <asm-generic/atomic64.h>
+
#endif /* __powerpc64__ */
#include <asm-generic/atomic-long.h>
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 53512374e1c..b7f8f4a87cc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@ static inline void local_irq_disable(void)
__asm__ __volatile__("wrteei 0": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr & ~MSR_EE);
#endif
@@ -92,7 +92,7 @@ static inline void local_irq_enable(void)
__asm__ __volatile__("wrteei 1": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr | MSR_EE);
#endif
@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#else
SET_MSR_EE(msr & ~MSR_EE);
#endif
- __asm__ __volatile__("": : :"memory");
}
#define local_save_flags(flags) ((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7464c0daddd..7ead7c16fb7 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,6 +35,16 @@
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
+
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cdb6fd814de..7f065e178ec 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -53,6 +53,13 @@ enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
extern u64 ps3_os_area_get_rtc_diff(void);
extern void ps3_os_area_set_rtc_diff(u64 rtc_diff);
+struct ps3_os_area_flash_ops {
+ ssize_t (*read)(void *buf, size_t count, loff_t pos);
+ ssize_t (*write)(const void *buf, size_t count, loff_t pos);
+};
+
+extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops);
+
/* dma routines */
enum ps3_dma_page_size {
@@ -418,15 +425,15 @@ static inline struct ps3_system_bus_driver *
* @data: Data to set
*/
-static inline void ps3_system_bus_set_driver_data(
+static inline void ps3_system_bus_set_drvdata(
struct ps3_system_bus_device *dev, void *data)
{
- dev->core.driver_data = data;
+ dev_set_drvdata(&dev->core, data);
}
-static inline void *ps3_system_bus_get_driver_data(
+static inline void *ps3_system_bus_get_drvdata(
struct ps3_system_bus_device *dev)
{
- return dev->core.driver_data;
+ return dev_get_drvdata(&dev->core);
}
/* These two need global scope for get_dma_ops(). */
@@ -520,7 +527,4 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
-/* mutex synchronizing GPU accesses and video mode changes */
-extern struct mutex ps3_gpu_mutex;
-
#endif
diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h
new file mode 100644
index 00000000000..b2b89591907
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3gpu.h
@@ -0,0 +1,86 @@
+/*
+ * PS3 GPU declarations.
+ *
+ * Copyright 2009 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_POWERPC_PS3GPU_H
+#define _ASM_POWERPC_PS3GPU_H
+
+#include <linux/mutex.h>
+
+#include <asm/lv1call.h>
+
+
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE 0x603
+
+#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32)
+
+#define L1GPU_DISPLAY_SYNC_HSYNC 1
+#define L1GPU_DISPLAY_SYNC_VSYNC 2
+
+
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
+
+static inline int lv1_gpu_display_sync(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_display_flip(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar,
+ u64 xdr_size, u64 ioif_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
+ xdr_lpar, xdr_size, ioif_offset, 0);
+}
+
+static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset,
+ u64 ioif_offset, u64 sync_width, u64 pitch)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+ ddr_offset, ioif_offset, sync_width,
+ pitch);
+}
+
+static inline int lv1_gpu_fb_close(u64 context_handle)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0,
+ 0, 0, 0);
+}
+
+#endif /* _ASM_POWERPC_PS3GPU_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0a693..a3c28e46947 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#ifdef CONFIG_PPC64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
- : : "r" (v))
+ : : "r" (v) : "memory")
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
-#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index a0b92de51c7..370600ca276 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -325,3 +325,4 @@ SYSCALL(inotify_init1)
SYSCALL_SPU(perf_counter_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
+COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4badac2d11d..cef080bfc60 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -344,10 +344,11 @@
#define __NR_perf_counter_open 319
#define __NR_preadv 320
#define __NR_pwritev 321
+#define __NR_rt_tgsigqueueinfo 322
#ifdef __KERNEL__
-#define __NR_syscalls 322
+#define __NR_syscalls 323
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a7def5f90ca..612b0c4dc26 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -125,6 +125,7 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
+ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
$(obj)/built-in.o: prom_init_check
quiet_cmd_prom_init_check = CALL $<
@@ -133,5 +134,6 @@ quiet_cmd_prom_init_check = CALL $<
PHONY += prom_init_check
prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
$(call cmd,prom_init_check)
+endif
clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f46548e6604..1f6816003eb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,8 +424,8 @@ void __init setup_system(void)
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
- printk("physical_start = 0x%lx\n",
- PHYSICAL_START);
+ printk("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
printk("-----------------------------------------------------\n");
DBG(" <- setup_system()\n");
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bee1443da76..15391c2ab01 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -52,6 +52,7 @@
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -1143,6 +1144,15 @@ void div128_by_32(u64 dividend_high, u64 dividend_low,
}
+/* We don't need to calibrate delay, we use the CPU timebase for that */
+void calibrate_delay(void)
+{
+ /* Some generic code (such as spinlock debug) use loops_per_jiffy
+ * as the number of __delay(1) in a jiffy, so make it so
+ */
+ loops_per_jiffy = tb_ticks_per_jiffy;
+}
+
static int __init rtc_init(void)
{
struct platform_device *pdev;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 0ce45c2b42f..c71498dbf21 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -329,7 +329,7 @@ static struct irq_host_ops msic_host_ops = {
static int axon_msi_shutdown(struct of_device *device)
{
- struct axon_msic *msic = device->dev.platform_data;
+ struct axon_msic *msic = dev_get_drvdata(&device->dev);
u32 tmp;
pr_debug("axon_msi: disabling %s\n",
@@ -416,7 +416,7 @@ static int axon_msi_probe(struct of_device *device,
msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
& MSIC_FIFO_SIZE_MASK;
- device->dev.platform_data = msic;
+ dev_set_drvdata(&device->dev, msic);
ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index bed4690de39..5b34fc211f3 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -100,16 +100,6 @@
#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
-/* Page table entries */
-#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
-#define IOPTE_M 0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
-#define IOPTE_H 0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-
/* IOMMU sizing */
#define IO_SEGMENT_SHIFT 28
@@ -193,19 +183,21 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
*/
const unsigned long prot = 0xc48;
base_pte =
- ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
- | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+ ((prot << (52 + 4 * direction)) &
+ (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
+ CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
#else
- base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
- (window->ioid & IOPTE_IOID_Mask);
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
- base_pte &= ~IOPTE_SO_RW;
+ base_pte &= ~CBE_IOPTE_SO_RW;
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
- io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -231,8 +223,9 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
#else
/* spider bridge does PCI reads after freeing - insert a mapping
* to a scratch page instead of an invalid entry */
- pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
- | (window->ioid & IOPTE_IOID_Mask);
+ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ __pa(window->iommu->pad_page) |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
@@ -1001,7 +994,7 @@ static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
addr, ptab, segment, offset);
- ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
+ ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
}
static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
@@ -1016,14 +1009,14 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
- base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M
- | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
if (iommu_fixed_is_weak)
pr_info("IOMMU: Using weak ordering for fixed mapping\n");
else {
pr_info("IOMMU: Using strong ordering for fixed mapping\n");
- base_pte |= IOPTE_SO_RW;
+ base_pte |= CBE_IOPTE_SO_RW;
}
for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index 4543c4bc3a5..c5a87a72057 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -204,7 +204,8 @@ static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
dt_prop(dt, name, &data, sizeof(u32));
}
-static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
+static void __init __maybe_unused dt_prop_u64(struct iseries_flat_dt *dt,
+ const char *name,
u64 data)
{
dt_prop(dt, name, &data, sizeof(u64));
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 3689c2413d2..fef4d515051 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -267,7 +267,8 @@ static struct pending_event *new_pending_event(void)
return ev;
}
-static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
+static int __maybe_unused
+signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
{
struct pending_event *ev = new_pending_event();
int rc;
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 9a2b6d94861..846eb8b57fd 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -24,6 +24,7 @@
#include <linux/lmb.h>
#include <asm/firmware.h>
+#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
@@ -605,9 +606,8 @@ static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
r->ioid,
iopte_flag);
if (result) {
- printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
- "failed: %s\n", __func__, __LINE__,
- ps3_result(result));
+ pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
goto fail_map;
}
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
@@ -1001,7 +1001,8 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
if (len > r->len)
len = r->len;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+ CBE_IOPTE_M);
BUG_ON(result);
}
@@ -1014,7 +1015,8 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
else
len -= map.rm.size - r->offset;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+ CBE_IOPTE_M);
BUG_ON(result);
}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cf1cd0f8c18..d6487a9c801 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -226,6 +226,44 @@ static struct property property_av_multi_out = {
.value = &saved_params.av_multi_out,
};
+
+static DEFINE_MUTEX(os_area_flash_mutex);
+
+static const struct ps3_os_area_flash_ops *os_area_flash_ops;
+
+void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops)
+{
+ mutex_lock(&os_area_flash_mutex);
+ os_area_flash_ops = ops;
+ mutex_unlock(&os_area_flash_mutex);
+}
+EXPORT_SYMBOL_GPL(ps3_os_area_flash_register);
+
+static ssize_t os_area_flash_read(void *buf, size_t count, loff_t pos)
+{
+ ssize_t res = -ENODEV;
+
+ mutex_lock(&os_area_flash_mutex);
+ if (os_area_flash_ops)
+ res = os_area_flash_ops->read(buf, count, pos);
+ mutex_unlock(&os_area_flash_mutex);
+
+ return res;
+}
+
+static ssize_t os_area_flash_write(const void *buf, size_t count, loff_t pos)
+{
+ ssize_t res = -ENODEV;
+
+ mutex_lock(&os_area_flash_mutex);
+ if (os_area_flash_ops)
+ res = os_area_flash_ops->write(buf, count, pos);
+ mutex_unlock(&os_area_flash_mutex);
+
+ return res;
+}
+
+
/**
* os_area_set_property - Add or overwrite a saved_params value to the device tree.
*
@@ -352,12 +390,12 @@ static int db_verify(const struct os_area_db *db)
if (memcmp(db->magic_num, OS_AREA_DB_MAGIC_NUM,
sizeof(db->magic_num))) {
pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
- return -1;
+ return -EINVAL;
}
if (db->version != 1) {
pr_debug("%s:%d version failed\n", __func__, __LINE__);
- return -1;
+ return -EINVAL;
}
return 0;
@@ -578,59 +616,48 @@ static void os_area_db_init(struct os_area_db *db)
*
*/
-static void __maybe_unused update_flash_db(void)
+static int update_flash_db(void)
{
- int result;
- int file;
- off_t offset;
+ const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
+ struct os_area_header *header;
ssize_t count;
- static const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
- const struct os_area_header *header;
+ int error;
+ loff_t pos;
struct os_area_db* db;
/* Read in header and db from flash. */
- file = sys_open("/dev/ps3flash", O_RDWR, 0);
-
- if (file < 0) {
- pr_debug("%s:%d sys_open failed\n", __func__, __LINE__);
- goto fail_open;
- }
-
header = kmalloc(buf_len, GFP_KERNEL);
-
if (!header) {
- pr_debug("%s:%d kmalloc failed\n", __func__, __LINE__);
- goto fail_malloc;
+ pr_debug("%s: kmalloc failed\n", __func__);
+ return -ENOMEM;
}
- offset = sys_lseek(file, 0, SEEK_SET);
-
- if (offset != 0) {
- pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
- goto fail_header_seek;
+ count = os_area_flash_read(header, buf_len, 0);
+ if (count < 0) {
+ pr_debug("%s: os_area_flash_read failed %zd\n", __func__,
+ count);
+ error = count;
+ goto fail;
}
- count = sys_read(file, (char __user *)header, buf_len);
-
- result = count < OS_AREA_SEGMENT_SIZE || verify_header(header)
- || count < header->db_area_offset * OS_AREA_SEGMENT_SIZE;
-
- if (result) {
- pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+ pos = header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+ if (count < OS_AREA_SEGMENT_SIZE || verify_header(header) ||
+ count < pos) {
+ pr_debug("%s: verify_header failed\n", __func__);
dump_header(header);
- goto fail_header;
+ error = -EINVAL;
+ goto fail;
}
/* Now got a good db offset and some maybe good db data. */
- db = (void*)header + header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+ db = (void *)header + pos;
- result = db_verify(db);
-
- if (result) {
- printk(KERN_NOTICE "%s:%d: Verify of flash database failed, "
- "formatting.\n", __func__, __LINE__);
+ error = db_verify(db);
+ if (error) {
+ pr_notice("%s: Verify of flash database failed, formatting.\n",
+ __func__);
dump_db(db);
os_area_db_init(db);
}
@@ -639,29 +666,16 @@ static void __maybe_unused update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
- offset = sys_lseek(file, header->db_area_offset * OS_AREA_SEGMENT_SIZE,
- SEEK_SET);
-
- if (offset != header->db_area_offset * OS_AREA_SEGMENT_SIZE) {
- pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
- goto fail_db_seek;
- }
-
- count = sys_write(file, (const char __user *)db,
- sizeof(struct os_area_db));
-
+ count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
if (count < sizeof(struct os_area_db)) {
- pr_debug("%s:%d sys_write failed\n", __func__, __LINE__);
+ pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
+ count);
+ error = count < 0 ? count : -EIO;
}
-fail_db_seek:
-fail_header:
-fail_header_seek:
+fail:
kfree(header);
-fail_malloc:
- sys_close(file);
-fail_open:
- return;
+ return error;
}
/**
@@ -674,11 +688,11 @@ fail_open:
static void os_area_queue_work_handler(struct work_struct *work)
{
struct device_node *node;
+ int error;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
node = of_find_node_by_path("/");
-
if (node) {
os_area_set_property(node, &property_rtc_diff);
of_node_put(node);
@@ -686,12 +700,10 @@ static void os_area_queue_work_handler(struct work_struct *work)
pr_debug("%s:%d of_find_node_by_path failed\n",
__func__, __LINE__);
-#if defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
- update_flash_db();
-#else
- printk(KERN_WARNING "%s:%d: No flash rom driver configured.\n",
- __func__, __LINE__);
-#endif
+ error = update_flash_db();
+ if (error)
+ pr_warning("%s: Could not update FLASH ROM\n", __func__);
+
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
@@ -808,7 +820,7 @@ u64 ps3_os_area_get_rtc_diff(void)
{
return saved_params.rtc_diff;
}
-EXPORT_SYMBOL(ps3_os_area_get_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_get_rtc_diff);
/**
* ps3_os_area_set_rtc_diff - Set the rtc diff value.
@@ -824,7 +836,7 @@ void ps3_os_area_set_rtc_diff(u64 rtc_diff)
os_area_queue_work();
}
}
-EXPORT_SYMBOL(ps3_os_area_set_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_set_rtc_diff);
/**
* ps3_os_area_get_av_multi_out - Returns the default video mode.
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 136aa0637d9..9a196a88eda 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -232,14 +232,4 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
int ps3_repository_read_vuart_av_port(unsigned int *port);
int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
-/* Page table entries */
-#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
-#define IOPTE_M 0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
-#define IOPTE_H 0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-
#endif
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 1a7b5ae0c83..149bea2ce58 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -32,6 +32,7 @@
#include <asm/udbg.h>
#include <asm/prom.h>
#include <asm/lv1call.h>
+#include <asm/ps3gpu.h>
#include "platform.h"
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 9a73d023863..9fead0faf38 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -27,6 +27,7 @@
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/firmware.h>
+#include <asm/iommu.h>
#include "platform.h"
@@ -531,7 +532,8 @@ static void * ps3_alloc_coherent(struct device *_dev, size_t size,
}
result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+ CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -575,7 +577,8 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr,
- IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
+ CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -596,16 +599,16 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
u64 iopte_flag;
void *ptr = page_address(page) + offset;
- iopte_flag = IOPTE_M;
+ iopte_flag = CBE_IOPTE_M;
switch (direction) {
case DMA_BIDIRECTIONAL:
- iopte_flag |= IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW;
+ iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
case DMA_TO_DEVICE:
- iopte_flag |= IOPTE_PP_R | IOPTE_SO_R;
+ iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
break;
case DMA_FROM_DEVICE:
- iopte_flag |= IOPTE_PP_W | IOPTE_SO_RW;
+ iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
/* not happned */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 99dc3ded6b4..a14dba0e4d6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -348,6 +348,9 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y if 64BIT
+
source "mm/Kconfig"
comment "I/O subsystem configuration"
@@ -592,6 +595,12 @@ config SECCOMP
endmenu
+menu "Power Management"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
source "net/Kconfig"
config PCMCIA
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 578c61f15a4..0ff387cebf8 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,7 +88,9 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
+ arch/s390/power/
+
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 1dfc7100c7e..264528e4f58 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,7 +5,7 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
*
- * Copyright IBM Corp. 2003, 2008
+ * Copyright IBM Corp. 2003, 2009
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
@@ -26,6 +26,8 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
#include <asm/appldata.h>
#include <asm/timer.h>
#include <asm/uaccess.h>
@@ -41,6 +43,9 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
+
+static struct platform_device *appldata_pdev;
+
/*
* /proc entries (sysctl)
*/
@@ -86,6 +91,7 @@ static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL;
static int appldata_timer_active;
+static int appldata_timer_suspended = 0;
/*
* Work queue
@@ -475,6 +481,93 @@ void appldata_unregister_ops(struct appldata_ops *ops)
/********************** module-ops management <END> **************************/
+/**************************** suspend / resume *******************************/
+static int appldata_freeze(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ get_online_cpus();
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_active) {
+ __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+ appldata_timer_suspended = 1;
+ }
+ spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0)
+ pr_err("Stopping the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_restore(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ get_online_cpus();
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_suspended) {
+ __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+ appldata_timer_suspended = 0;
+ }
+ spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ ops->callback(ops->data); // init record
+ rc = appldata_diag(ops->record_nr,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0) {
+ pr_err("Starting the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_thaw(struct device *dev)
+{
+ return appldata_restore(dev);
+}
+
+static struct dev_pm_ops appldata_pm_ops = {
+ .freeze = appldata_freeze,
+ .thaw = appldata_thaw,
+ .restore = appldata_restore,
+};
+
+static struct platform_driver appldata_pdrv = {
+ .driver = {
+ .name = "appldata",
+ .owner = THIS_MODULE,
+ .pm = &appldata_pm_ops,
+ },
+};
+/************************* suspend / resume <END> ****************************/
+
+
/******************************* init / exit *********************************/
static void __cpuinit appldata_online_cpu(int cpu)
@@ -531,11 +624,23 @@ static struct notifier_block __cpuinitdata appldata_nb = {
*/
static int __init appldata_init(void)
{
- int i;
+ int i, rc;
+
+ rc = platform_driver_register(&appldata_pdrv);
+ if (rc)
+ return rc;
+ appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
+ 0);
+ if (IS_ERR(appldata_pdev)) {
+ rc = PTR_ERR(appldata_pdev);
+ goto out_driver;
+ }
appldata_wq = create_singlethread_workqueue("appldata");
- if (!appldata_wq)
- return -ENOMEM;
+ if (!appldata_wq) {
+ rc = -ENOMEM;
+ goto out_device;
+ }
get_online_cpus();
for_each_online_cpu(i)
@@ -547,6 +652,12 @@ static int __init appldata_init(void)
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
return 0;
+
+out_device:
+ platform_device_unregister(appldata_pdev);
+out_driver:
+ platform_driver_unregister(&appldata_pdrv);
+ return rc;
}
__initcall(appldata_init);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index ba007d8df94..2a541955117 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -1,11 +1,9 @@
/*
- * include/asm-s390/ccwdev.h
- * include/asm-s390x/ccwdev.h
+ * Copyright IBM Corp. 2002, 2009
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Arnd Bergmann <arndb@de.ibm.com>
+ * Author(s): Arnd Bergmann <arndb@de.ibm.com>
*
- * Interface for CCW device drivers
+ * Interface for CCW device drivers
*/
#ifndef _S390_CCWDEV_H_
#define _S390_CCWDEV_H_
@@ -104,6 +102,11 @@ struct ccw_device {
* @set_offline: called when setting device offline
* @notify: notify driver of device state changes
* @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
* @driver: embedded device driver structure
* @name: device driver name
*/
@@ -116,6 +119,11 @@ struct ccw_driver {
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
void (*shutdown) (struct ccw_device *);
+ int (*prepare) (struct ccw_device *);
+ void (*complete) (struct ccw_device *);
+ int (*freeze)(struct ccw_device *);
+ int (*thaw) (struct ccw_device *);
+ int (*restore)(struct ccw_device *);
struct device_driver driver;
char *name;
};
@@ -184,6 +192,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
+extern int ccw_device_force_console(void);
// FIXME: these have to go
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index a27f68985a7..c79c1e787b8 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -38,6 +38,11 @@ struct ccwgroup_device {
* @set_online: function called when device is set online
* @set_offline: function called when device is set offline
* @shutdown: function called when device is shut down
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
* @driver: embedded driver structure
*/
struct ccwgroup_driver {
@@ -51,6 +56,11 @@ struct ccwgroup_driver {
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
void (*shutdown)(struct ccwgroup_device *);
+ int (*prepare) (struct ccwgroup_device *);
+ void (*complete) (struct ccwgroup_device *);
+ int (*freeze)(struct ccwgroup_device *);
+ int (*thaw) (struct ccwgroup_device *);
+ int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
};
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
new file mode 100644
index 00000000000..dc75c616eaf
--- /dev/null
+++ b/arch/s390/include/asm/suspend.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_S390_SUSPEND_H
+#define __ASM_S390_SUSPEND_H
+
+static inline int arch_prepare_suspend(void)
+{
+ return 0;
+}
+
+#endif
+
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3a8b26eb1f2..4fb83c1cdb7 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -1,11 +1,7 @@
/*
- * include/asm-s390/system.h
+ * Copyright IBM Corp. 1999, 2009
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- * Derived from "include/asm-i386/system.h"
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_SYSTEM_H
@@ -469,6 +465,20 @@ extern psw_t sysc_restore_trace_psw;
extern psw_t io_restore_trace_psw;
#endif
+static inline int tprot(unsigned long addr)
+{
+ int rc = -EFAULT;
+
+ asm volatile(
+ " tprot 0(%1),0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "a" (addr) : "cc");
+ return rc;
+}
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index fb263736826..f9b144049dc 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/early.c
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_VM;
}
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static __init void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-static noinline __init void setup_lowcore_early(void)
+void setup_lowcore_early(void)
{
psw_t psw;
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 9872999c66d..559af0d0787 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -1,6 +1,7 @@
/*
- * Copyright IBM Corp. 2008
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
@@ -9,20 +10,6 @@
#include <asm/sclp.h>
#include <asm/setup.h>
-static inline int tprot(unsigned long addr)
-{
- int rc = -EFAULT;
-
- asm volatile(
- " tprot 0(%1),0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "a" (addr) : "cc");
- return rc;
-}
-
#define ADDR2G (1ULL << 31)
static void find_memory_chunks(struct mem_chunk chunk[])
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index cc8c484984e..fd8e3111a4e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/smp.c
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2009
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
@@ -1031,6 +1031,42 @@ out:
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
+/*
+ * If the resume kernel runs on another cpu than the suspended kernel,
+ * we have to switch the cpu IDs in the logical map.
+ */
+void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id,
+ struct _lowcore *suspend_lowcore)
+{
+ int cpu, suspend_cpu_id, resume_cpu_id;
+ u32 suspend_phys_cpu_id;
+
+ suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr];
+ suspend_cpu_id = suspend_lowcore->cpu_nr;
+
+ for_each_present_cpu(cpu) {
+ if (__cpu_logical_map[cpu] == resume_phys_cpu_id) {
+ resume_cpu_id = cpu;
+ goto found;
+ }
+ }
+ panic("Could not find resume cpu in logical map.\n");
+
+found:
+ printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id);
+ printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id);
+
+ __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id;
+ __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id;
+
+ lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id;
+}
+
+u32 smp_get_phys_cpu_id(void)
+{
+ return __cpu_logical_map[smp_processor_id()];
+}
+
static int __init topology_init(void)
{
int cpu;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4ca8e826bf3..56566720798 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -313,3 +313,22 @@ int s390_enable_sie(void)
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm("lra %1,0(%1)\n"
+ "ipm %0\n"
+ "srl %0,28"
+ :"=d"(cc),"+a"(addr)::"cc");
+ return cc == 0;
+}
+
+#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
new file mode 100644
index 00000000000..973bb45a8fe
--- /dev/null
+++ b/arch/s390/power/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for s390 PM support
+#
+
+obj-$(CONFIG_HIBERNATION) += suspend.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_HIBERNATION) += swsusp_64.o
+obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
new file mode 100644
index 00000000000..b3351eceebb
--- /dev/null
+++ b/arch/s390/power/suspend.c
@@ -0,0 +1,40 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
new file mode 100644
index 00000000000..e6a4fe9f5f2
--- /dev/null
+++ b/arch/s390/power/swsusp.c
@@ -0,0 +1,30 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+
+/*
+ * save CPU registers before creating a hibernation image and before
+ * restoring the memory state from it
+ */
+void save_processor_state(void)
+{
+ /* implentation contained in the
+ * swsusp_arch_suspend function
+ */
+}
+
+/*
+ * restore the contents of CPU registers
+ */
+void restore_processor_state(void)
+{
+ /* implentation contained in the
+ * swsusp_arch_resume function
+ */
+}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
new file mode 100644
index 00000000000..9516a517d72
--- /dev/null
+++ b/arch/s390/power/swsusp_64.c
@@ -0,0 +1,17 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <asm/system.h>
+#include <linux/interrupt.h>
+
+void do_after_copyback(void)
+{
+ mb();
+}
+
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
new file mode 100644
index 00000000000..3c74e7d827c
--- /dev/null
+++ b/arch/s390/power/swsusp_asm64.S
@@ -0,0 +1,199 @@
+/*
+ * S390 64-bit swsusp implementation
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Save register context in absolute 0 lowcore and call swsusp_save() to
+ * create in-memory kernel image. The context is saved in the designated
+ * "store status" memory locations (see POP).
+ * We return from this function twice. The first time during the suspend to
+ * disk process. The second time via the swsusp_arch_resume() function
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
+ .section .text
+ .align 2
+ .globl swsusp_arch_suspend
+swsusp_arch_suspend:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Switch off lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ ni __SF_EMPTY+4(%r15),0xef
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Store prefix register on stack */
+ stpx __SF_EMPTY(%r15)
+
+ /* Setup base register for lowcore (absolute 0) */
+ llgf %r1,__SF_EMPTY(%r15)
+
+ /* Get pointer to save area */
+ aghi %r1,0x1000
+
+ /* Store registers */
+ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
+ stfpc 0x31c(%r1) /* store fpu control */
+ std 0,0x200(%r1) /* store f0 */
+ std 1,0x208(%r1) /* store f1 */
+ std 2,0x210(%r1) /* store f2 */
+ std 3,0x218(%r1) /* store f3 */
+ std 4,0x220(%r1) /* store f4 */
+ std 5,0x228(%r1) /* store f5 */
+ std 6,0x230(%r1) /* store f6 */
+ std 7,0x238(%r1) /* store f7 */
+ std 8,0x240(%r1) /* store f8 */
+ std 9,0x248(%r1) /* store f9 */
+ std 10,0x250(%r1) /* store f10 */
+ std 11,0x258(%r1) /* store f11 */
+ std 12,0x260(%r1) /* store f12 */
+ std 13,0x268(%r1) /* store f13 */
+ std 14,0x270(%r1) /* store f14 */
+ std 15,0x278(%r1) /* store f15 */
+ stam %a0,%a15,0x340(%r1) /* store access registers */
+ stctg %c0,%c15,0x380(%r1) /* store control registers */
+ stmg %r0,%r15,0x280(%r1) /* store general registers */
+
+ stpt 0x328(%r1) /* store timer */
+ stckc 0x330(%r1) /* store clock comparator */
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Setup lowcore */
+ brasl %r14,setup_lowcore_early
+
+ /* Save image */
+ brasl %r14,swsusp_save
+
+ /* Switch on lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ oi __SF_EMPTY+4(%r15),0x10
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Restore prefix register and return */
+ lghi %r1,0x1000
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
+
+/*
+ * Restore saved memory image to correct place and restore register context.
+ * Then we return to the function that called swsusp_arch_suspend().
+ * swsusp_arch_resume() runs with disabled interrupts.
+ */
+ .globl swsusp_arch_resume
+swsusp_arch_resume:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Save boot cpu number */
+ brasl %r14,smp_get_phys_cpu_id
+ lgr %r10,%r2
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Switch off lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ ni __SF_EMPTY+4(%r15),0xef
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Restore saved image */
+ larl %r1,restore_pblist
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 2f
+0:
+ lg %r2,8(%r1)
+ lg %r4,0(%r1)
+ lghi %r3,PAGE_SIZE
+ lghi %r5,PAGE_SIZE
+1:
+ mvcle %r2,%r4,0
+ jo 1b
+ lg %r1,16(%r1)
+ ltgr %r1,%r1
+ jnz 0b
+2:
+ ptlb /* flush tlb */
+
+ /* Restore registers */
+ lghi %r13,0x1000 /* %r1 = pointer to save arae */
+
+ spt 0x328(%r13) /* reprogram timer */
+ //sckc 0x330(%r13) /* set clock comparator */
+
+ lctlg %c0,%c15,0x380(%r13) /* load control registers */
+ lam %a0,%a15,0x340(%r13) /* load access registers */
+
+ lfpc 0x31c(%r13) /* load fpu control */
+ ld 0,0x200(%r13) /* load f0 */
+ ld 1,0x208(%r13) /* load f1 */
+ ld 2,0x210(%r13) /* load f2 */
+ ld 3,0x218(%r13) /* load f3 */
+ ld 4,0x220(%r13) /* load f4 */
+ ld 5,0x228(%r13) /* load f5 */
+ ld 6,0x230(%r13) /* load f6 */
+ ld 7,0x238(%r13) /* load f7 */
+ ld 8,0x240(%r13) /* load f8 */
+ ld 9,0x248(%r13) /* load f9 */
+ ld 10,0x250(%r13) /* load f10 */
+ ld 11,0x258(%r13) /* load f11 */
+ ld 12,0x260(%r13) /* load f12 */
+ ld 13,0x268(%r13) /* load f13 */
+ ld 14,0x270(%r13) /* load f14 */
+ ld 15,0x278(%r13) /* load f15 */
+
+ /* Load old stack */
+ lg %r15,0x2f8(%r13)
+
+ /* Pointer to save arae */
+ lghi %r13,0x1000
+
+ /* Switch CPUs */
+ lgr %r2,%r10 /* get cpu id */
+ llgf %r3,0x318(%r13)
+ brasl %r14,smp_switch_boot_cpu_in_resume
+
+ /* Restore prefix register */
+ spx 0x318(%r13)
+
+ /* Switch on lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ oi __SF_EMPTY+4(%r15),0x10
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cc12cd48bbc..3f8b6a92eab 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@ config SPARC64
select HAVE_KPROBES
select HAVE_LMB
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
@@ -93,6 +95,9 @@ config AUDIT_ARCH
config HAVE_SETUP_PER_CPU_AREA
def_bool y if SPARC64
+config HAVE_DYNAMIC_PER_CPU_AREA
+ def_bool y if SPARC64
+
config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y if SPARC64
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b5d63bd8716..0123a4c596c 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Fri Apr 17 02:03:07 2009
+# Linux kernel version: 2.6.30
+# Tue Jun 16 04:59:36 2009
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@@ -19,6 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_AUDIT_ARCH=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_MMU=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -82,7 +83,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -95,16 +95,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-# CONFIG_MARKERS is not set
+CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -202,6 +207,7 @@ CONFIG_NR_QUICK=1
CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
@@ -321,6 +327,7 @@ CONFIG_VLAN_8021Q=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -340,7 +347,11 @@ CONFIG_WIRELESS=y
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
-# CONFIG_MAC80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -364,6 +375,7 @@ CONFIG_EXTRA_FIRMWARE=""
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
CONFIG_OF_DEVICE=y
+CONFIG_OF_MDIO=m
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
@@ -399,6 +411,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_AT24 is not set
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
@@ -477,10 +490,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
@@ -499,6 +508,7 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -507,6 +517,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
@@ -521,7 +532,6 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -569,7 +579,6 @@ CONFIG_DM_ZERO=m
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -635,6 +644,7 @@ CONFIG_NET_PCI=y
# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -1127,6 +1137,11 @@ CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_MPU401_UART=m
CONFIG_SND_AC97_CODEC=m
CONFIG_SND_DRIVERS=y
@@ -1153,6 +1168,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
# CONFIG_SND_DARLA20 is not set
# CONFIG_SND_GINA20 is not set
# CONFIG_SND_LAYLA20 is not set
@@ -1183,6 +1199,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
@@ -1229,6 +1246,7 @@ CONFIG_HID_BELKIN=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CHICONY=y
CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
# CONFIG_DRAGONRISE_FF is not set
CONFIG_HID_EZKEY=y
CONFIG_HID_KYE=y
@@ -1246,9 +1264,14 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
@@ -1462,6 +1485,7 @@ CONFIG_FILE_LOCKING=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1636,25 +1660,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89ee9ef..926397d345f 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
-#include <asm/hypervisor.h>
-#include <asm/asi.h>
-
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
-/* Trap handling code needs to get at a few critical values upon
- * trap entry and to process TSB misses. These cannot be in the
- * per_cpu() area as we really need to lock them into the TLB and
- * thus make them part of the main kernel image. As a result we
- * try to make this as small as possible.
- *
- * This is padded out and aligned to 64-bytes to avoid false sharing
- * on SMP.
- */
-
-/* If you modify the size of this structure, please update
- * TRAP_BLOCK_SZ_SHIFT below.
- */
-struct thread_info;
-struct trap_per_cpu {
-/* D-cache line 1: Basic thread information, cpu and device mondo queues */
- struct thread_info *thread;
- unsigned long pgd_paddr;
- unsigned long cpu_mondo_pa;
- unsigned long dev_mondo_pa;
-
-/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
- unsigned long resum_mondo_pa;
- unsigned long resum_kernel_buf_pa;
- unsigned long nonresum_mondo_pa;
- unsigned long nonresum_kernel_buf_pa;
-
-/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
- struct hv_fault_status fault_info;
-
-/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
- unsigned long cpu_mondo_block_pa;
- unsigned long cpu_list_pa;
- unsigned long tsb_huge;
- unsigned long tsb_huge_temp;
-
-/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
- unsigned long irq_worklist_pa;
- unsigned int cpu_mondo_qmask;
- unsigned int dev_mondo_qmask;
- unsigned int resum_qmask;
- unsigned int nonresum_qmask;
- void *hdesc;
-} __attribute__((aligned(64)));
-extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
-extern int ncpus_probed;
extern const struct seq_operations cpuinfo_op;
-extern unsigned long real_hard_smp_processor_id(void);
-
-struct cpuid_patch_entry {
- unsigned int addr;
- unsigned int cheetah_safari[4];
- unsigned int cheetah_jbus[4];
- unsigned int starfire[4];
- unsigned int sun4v[4];
-};
-extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
-
-struct sun4v_1insn_patch_entry {
- unsigned int addr;
- unsigned int insn;
-};
-extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
- __sun4v_1insn_patch_end;
-
-struct sun4v_2insn_patch_entry {
- unsigned int addr;
- unsigned int insns[2];
-};
-extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
- __sun4v_2insn_patch_end;
-
#endif /* !(__ASSEMBLY__) */
-#define TRAP_PER_CPU_THREAD 0x00
-#define TRAP_PER_CPU_PGD_PADDR 0x08
-#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
-#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
-#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
-#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
-#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
-#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
-#define TRAP_PER_CPU_FAULT_INFO 0x40
-#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
-#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
-#define TRAP_PER_CPU_TSB_HUGE 0xd0
-#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
-#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
-#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
-#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
-#define TRAP_PER_CPU_RESUM_QMASK 0xf0
-#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
-
-#define TRAP_BLOCK_SZ_SHIFT 8
-
-#include <asm/scratchpad.h>
-
-#define __GET_CPUID(REG) \
- /* Spitfire implementation (default). */ \
-661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- .section .cpuid_patch, "ax"; \
- /* Instruction location. */ \
- .word 661b; \
- /* Cheetah Safari implementation. */ \
- ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x3ff, REG; \
- nop; \
- /* Cheetah JBUS implementation. */ \
- ldxa [%g0] ASI_JBUS_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- /* Starfire implementation. */ \
- sethi %hi(0x1fff40000d0 >> 9), REG; \
- sllx REG, 9, REG; \
- or REG, 0xd0, REG; \
- lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
- /* sun4v implementation. */ \
- mov SCRATCHPAD_CPUID, REG; \
- ldxa [REG] ASI_SCRATCHPAD, REG; \
- nop; \
- nop; \
- .previous;
-
-#ifdef CONFIG_SMP
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- __GET_CPUID(TMP) \
- sethi %hi(trap_block), DEST; \
- sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
- or DEST, %lo(trap_block), DEST; \
- add DEST, TMP, DEST; \
-
-/* Clobbers TMP, current address space PGD phys address into DEST. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-/* Clobbers TMP, loads DEST with current thread info pointer. */
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* Given the current thread info pointer in THR, load the per-cpu
- * area base of the current processor into DEST. REG1, REG2, and REG3 are
- * clobbered.
- *
- * You absolutely cannot use DEST as a temporary in this code. The
- * reason is that traps can happen during execution, and return from
- * trap will load the fully resolved DEST per-cpu base. This can corrupt
- * the calculations done by the macro mid-stream.
- */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
- lduh [THR + TI_CPU], REG1; \
- sethi %hi(__per_cpu_shift), REG3; \
- sethi %hi(__per_cpu_base), REG2; \
- ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
- ldx [REG2 + %lo(__per_cpu_base)], REG2; \
- sllx REG1, REG3, REG3; \
- add REG3, REG2, DEST;
-
-#else
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- sethi %hi(trap_block), DEST; \
- or DEST, %lo(trap_block), DEST; \
-
-/* Uniprocessor versions, we know the cpuid is zero. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* No per-cpu areas on uniprocessor, so no need to load DEST. */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
-
-#endif /* !(CONFIG_SMP) */
+#include <asm/trap_block.h>
#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e2661..204e4bf6443 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/dma-mapping_64.h>
-#else
-#include <asm/dma-mapping_32.h>
-#endif
+
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d, h) (1)
+
+struct dma_ops {
+ void *(*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle);
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction);
+ void (*sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nelems,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+};
+extern const struct dma_ops *dma_ops;
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, virt_to_page(cpu_addr),
+ (unsigned long)cpu_addr & ~PAGE_MASK, size,
+ direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, page, offset, size, direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ dma_ops->unmap_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(dev, dma_handle, size,
+ direction);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
+}
+
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return (dma_addr == DMA_ERROR_CODE);
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+ /*
+ * no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe
+ */
+ return (1 << INTERNODE_CACHE_SHIFT);
+}
+
#endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0573e..00000000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_SPARC_DMA_MAPPING_H
-#define _ASM_SPARC_DMA_MAPPING_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct page;
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_get_cache_alignment(void);
-
-#define dma_alloc_noncoherent dma_alloc_coherent
-#define dma_free_noncoherent dma_free_coherent
-
-#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9702d..00000000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef _ASM_SPARC64_DMA_MAPPING_H
-#define _ASM_SPARC64_DMA_MAPPING_H
-
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-struct dma_ops {
- void *(*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
- void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
- int nelems,
- enum dma_data_direction direction);
-};
-extern const struct dma_ops *dma_ops;
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, cpu_addr, size, direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, page_address(page) + offset,
- size, direction);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_address, size, direction);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- return dma_ops->map_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- dma_ops->unmap_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-static inline int dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d, h) (1)
-
-#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716cd38c..b0f18e9893d 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@ extern void _mcount(void);
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc7272e53..9faa046713f 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@ struct mdesc_notifier_client {
extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
-extern void mdesc_fill_in_cpu_data(cpumask_t mask);
+extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
+extern void mdesc_populate_present_mask(cpumask_t *mask);
extern void sun4v_mdesc_init(void);
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee64593023..007aafb4ae9 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5");
#ifdef CONFIG_SMP
-extern void real_setup_per_cpu_areas(void);
+#include <asm/trap_block.h>
-extern unsigned long __per_cpu_base;
-extern unsigned long __per_cpu_shift;
#define __per_cpu_offset(__cpu) \
- (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
+ (trap_block[(__cpu)].__per_cpu_base)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __my_cpu_offset __local_per_cpu_offset
#else /* ! SMP */
-#define real_setup_per_cpu_areas() do { } while (0)
-
#endif /* SMP */
#include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d44714f8..be8d7aaeb60 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp);
#endif
extern void prom_build_devicetree(void);
+extern void of_populate_present_mask(void);
+extern void of_fill_in_cpu_data(void);
/* Dummy ref counting routines - to be implemented later */
static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 00000000000..7e26b2db621
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
+#ifndef _SPARC_TRAP_BLOCK_H
+#define _SPARC_TRAP_BLOCK_H
+
+#include <asm/hypervisor.h>
+#include <asm/asi.h>
+
+#ifndef __ASSEMBLY__
+
+/* Trap handling code needs to get at a few critical values upon
+ * trap entry and to process TSB misses. These cannot be in the
+ * per_cpu() area as we really need to lock them into the TLB and
+ * thus make them part of the main kernel image. As a result we
+ * try to make this as small as possible.
+ *
+ * This is padded out and aligned to 64-bytes to avoid false sharing
+ * on SMP.
+ */
+
+/* If you modify the size of this structure, please update
+ * TRAP_BLOCK_SZ_SHIFT below.
+ */
+struct thread_info;
+struct trap_per_cpu {
+/* D-cache line 1: Basic thread information, cpu and device mondo queues */
+ struct thread_info *thread;
+ unsigned long pgd_paddr;
+ unsigned long cpu_mondo_pa;
+ unsigned long dev_mondo_pa;
+
+/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
+ unsigned long resum_mondo_pa;
+ unsigned long resum_kernel_buf_pa;
+ unsigned long nonresum_mondo_pa;
+ unsigned long nonresum_kernel_buf_pa;
+
+/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
+ struct hv_fault_status fault_info;
+
+/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
+ unsigned long cpu_mondo_block_pa;
+ unsigned long cpu_list_pa;
+ unsigned long tsb_huge;
+ unsigned long tsb_huge_temp;
+
+/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
+ unsigned long irq_worklist_pa;
+ unsigned int cpu_mondo_qmask;
+ unsigned int dev_mondo_qmask;
+ unsigned int resum_qmask;
+ unsigned int nonresum_qmask;
+ unsigned long __per_cpu_base;
+} __attribute__((aligned(64)));
+extern struct trap_per_cpu trap_block[NR_CPUS];
+extern void init_cur_cpu_trap(struct thread_info *);
+extern void setup_tba(void);
+extern int ncpus_probed;
+
+extern unsigned long real_hard_smp_processor_id(void);
+
+struct cpuid_patch_entry {
+ unsigned int addr;
+ unsigned int cheetah_safari[4];
+ unsigned int cheetah_jbus[4];
+ unsigned int starfire[4];
+ unsigned int sun4v[4];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+
+struct sun4v_1insn_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
+ __sun4v_1insn_patch_end;
+
+struct sun4v_2insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[2];
+};
+extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
+ __sun4v_2insn_patch_end;
+
+
+#endif /* !(__ASSEMBLY__) */
+
+#define TRAP_PER_CPU_THREAD 0x00
+#define TRAP_PER_CPU_PGD_PADDR 0x08
+#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
+#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
+#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
+#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
+#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
+#define TRAP_PER_CPU_FAULT_INFO 0x40
+#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
+#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
+#define TRAP_PER_CPU_TSB_HUGE 0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
+#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
+#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
+#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
+#define TRAP_PER_CPU_RESUM_QMASK 0xf0
+#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
+#define TRAP_PER_CPU_PER_CPU_BASE 0xf8
+
+#define TRAP_BLOCK_SZ_SHIFT 8
+
+#include <asm/scratchpad.h>
+
+#define __GET_CPUID(REG) \
+ /* Spitfire implementation (default). */ \
+661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ .section .cpuid_patch, "ax"; \
+ /* Instruction location. */ \
+ .word 661b; \
+ /* Cheetah Safari implementation. */ \
+ ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x3ff, REG; \
+ nop; \
+ /* Cheetah JBUS implementation. */ \
+ ldxa [%g0] ASI_JBUS_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ /* Starfire implementation. */ \
+ sethi %hi(0x1fff40000d0 >> 9), REG; \
+ sllx REG, 9, REG; \
+ or REG, 0xd0, REG; \
+ lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
+ /* sun4v implementation. */ \
+ mov SCRATCHPAD_CPUID, REG; \
+ ldxa [REG] ASI_SCRATCHPAD, REG; \
+ nop; \
+ nop; \
+ .previous;
+
+#ifdef CONFIG_SMP
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ __GET_CPUID(TMP) \
+ sethi %hi(trap_block), DEST; \
+ sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
+ or DEST, %lo(trap_block), DEST; \
+ add DEST, TMP, DEST; \
+
+/* Clobbers TMP, current address space PGD phys address into DEST. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+/* Clobbers TMP, loads DEST with current thread info pointer. */
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* Given the current thread info pointer in THR, load the per-cpu
+ * area base of the current processor into DEST. REG1, REG2, and REG3 are
+ * clobbered.
+ *
+ * You absolutely cannot use DEST as a temporary in this code. The
+ * reason is that traps can happen during execution, and return from
+ * trap will load the fully resolved DEST per-cpu base. This can corrupt
+ * the calculations done by the macro mid-stream.
+ */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
+ lduh [THR + TI_CPU], REG1; \
+ sethi %hi(trap_block), REG2; \
+ sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \
+ or REG2, %lo(trap_block), REG2; \
+ add REG2, REG1, REG2; \
+ ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
+
+#else
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ sethi %hi(trap_block), DEST; \
+ or DEST, %lo(trap_block), DEST; \
+
+/* Uniprocessor versions, we know the cpuid is zero. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* No per-cpu areas on uniprocessor, so no need to load DEST. */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
+
+#endif /* !(CONFIG_SMP) */
+
+#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71ef316..b2c406de7d4 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
#define __NR_accept4 323
#define __NR_preadv 324
#define __NR_pwritev 325
+#define __NR_rt_tgsigqueueinfo 326
-#define NR_SYSCALLS 326
+#define NR_SYSCALLS 327
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 54742e58831..475ce4696ac 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o
obj-$(CONFIG_SPARC32) += muldiv.o
obj-y += prom_common.o
obj-y += prom_$(BITS).o
+obj-y += of_device_common.o
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o
obj-$(CONFIG_SPARC64) += mdesc.o
obj-$(CONFIG_SPARC64) += pcr.o
obj-$(CONFIG_SPARC64) += nmi.o
+obj-$(CONFIG_SPARC64_SMP) += cpumap.o
# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
new file mode 100644
index 00000000000..7430ed080b2
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.c
@@ -0,0 +1,431 @@
+/* cpumap.c: used for optimizing CPU assignment
+ *
+ * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/cpudata.h>
+#include "cpumap.h"
+
+
+enum {
+ CPUINFO_LVL_ROOT = 0,
+ CPUINFO_LVL_NODE,
+ CPUINFO_LVL_CORE,
+ CPUINFO_LVL_PROC,
+ CPUINFO_LVL_MAX,
+};
+
+enum {
+ ROVER_NO_OP = 0,
+ /* Increment rover every time level is visited */
+ ROVER_INC_ON_VISIT = 1 << 0,
+ /* Increment parent's rover every time rover wraps around */
+ ROVER_INC_PARENT_ON_LOOP = 1 << 1,
+};
+
+struct cpuinfo_node {
+ int id;
+ int level;
+ int num_cpus; /* Number of CPUs in this hierarchy */
+ int parent_index;
+ int child_start; /* Array index of the first child node */
+ int child_end; /* Array index of the last child node */
+ int rover; /* Child node iterator */
+};
+
+struct cpuinfo_level {
+ int start_index; /* Index of first node of a level in a cpuinfo tree */
+ int end_index; /* Index of last node of a level in a cpuinfo tree */
+ int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
+};
+
+struct cpuinfo_tree {
+ int total_nodes;
+
+ /* Offsets into nodes[] for each level of the tree */
+ struct cpuinfo_level level[CPUINFO_LVL_MAX];
+ struct cpuinfo_node nodes[0];
+};
+
+
+static struct cpuinfo_tree *cpuinfo_tree;
+
+static u16 cpu_distribution_map[NR_CPUS];
+static DEFINE_SPINLOCK(cpu_map_lock);
+
+
+/* Niagara optimized cpuinfo tree traversal. */
+static const int niagara_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_NO_OP,
+
+ /* Strands (or virtual CPUs) within a core may not run concurrently
+ * on the Niagara, as instruction pipeline(s) are shared. Distribute
+ * work to strands in different cores first for better concurrency.
+ * Go to next NUMA node when all cores are used.
+ */
+ [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+
+ /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
+ * a proc_id represents an instruction pipeline. Distribute work to
+ * strands in different proc_id groups if the core has multiple
+ * instruction pipelines (e.g. the Niagara 2/2+ has two).
+ */
+ [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
+
+ /* Pick the next strand in the proc_id group. */
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
+};
+
+/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
+ * nodes.
+ */
+static const int generic_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
+ [CPUINFO_LVL_NODE] = ROVER_NO_OP,
+ [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+};
+
+
+static int cpuinfo_id(int cpu, int level)
+{
+ int id;
+
+ switch (level) {
+ case CPUINFO_LVL_ROOT:
+ id = 0;
+ break;
+ case CPUINFO_LVL_NODE:
+ id = cpu_to_node(cpu);
+ break;
+ case CPUINFO_LVL_CORE:
+ id = cpu_data(cpu).core_id;
+ break;
+ case CPUINFO_LVL_PROC:
+ id = cpu_data(cpu).proc_id;
+ break;
+ default:
+ id = -EINVAL;
+ }
+ return id;
+}
+
+/*
+ * Enumerate the CPU information in __cpu_data to determine the start index,
+ * end index, and number of nodes for each level in the cpuinfo tree. The
+ * total number of cpuinfo nodes required to build the tree is returned.
+ */
+static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
+{
+ int prev_id[CPUINFO_LVL_MAX];
+ int i, n, num_nodes;
+
+ for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
+ struct cpuinfo_level *lv = &tree_level[i];
+
+ prev_id[i] = -1;
+ lv->start_index = lv->end_index = lv->num_nodes = 0;
+ }
+
+ num_nodes = 1; /* Include the root node */
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!cpu_online(i))
+ continue;
+
+ n = cpuinfo_id(i, CPUINFO_LVL_NODE);
+ if (n > prev_id[CPUINFO_LVL_NODE]) {
+ tree_level[CPUINFO_LVL_NODE].num_nodes++;
+ prev_id[CPUINFO_LVL_NODE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_CORE);
+ if (n > prev_id[CPUINFO_LVL_CORE]) {
+ tree_level[CPUINFO_LVL_CORE].num_nodes++;
+ prev_id[CPUINFO_LVL_CORE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_PROC);
+ if (n > prev_id[CPUINFO_LVL_PROC]) {
+ tree_level[CPUINFO_LVL_PROC].num_nodes++;
+ prev_id[CPUINFO_LVL_PROC] = n;
+ num_nodes++;
+ }
+ }
+
+ tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
+
+ n = tree_level[CPUINFO_LVL_NODE].num_nodes;
+ tree_level[CPUINFO_LVL_NODE].start_index = 1;
+ tree_level[CPUINFO_LVL_NODE].end_index = n;
+
+ n++;
+ tree_level[CPUINFO_LVL_CORE].start_index = n;
+ n += tree_level[CPUINFO_LVL_CORE].num_nodes;
+ tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
+
+ tree_level[CPUINFO_LVL_PROC].start_index = n;
+ n += tree_level[CPUINFO_LVL_PROC].num_nodes;
+ tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
+
+ return num_nodes;
+}
+
+/* Build a tree representation of the CPU hierarchy using the per CPU
+ * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
+ * assumed to be sorted in ascending order based on node, core_id, and
+ * proc_id (in order of significance).
+ */
+static struct cpuinfo_tree *build_cpuinfo_tree(void)
+{
+ struct cpuinfo_tree *new_tree;
+ struct cpuinfo_node *node;
+ struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
+ int num_cpus[CPUINFO_LVL_MAX];
+ int level_rover[CPUINFO_LVL_MAX];
+ int prev_id[CPUINFO_LVL_MAX];
+ int n, id, cpu, prev_cpu, last_cpu, level;
+
+ n = enumerate_cpuinfo_nodes(tmp_level);
+
+ new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
+ (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+ if (!new_tree)
+ return NULL;
+
+ new_tree->total_nodes = n;
+ memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
+
+ prev_cpu = cpu = first_cpu(cpu_online_map);
+
+ /* Initialize all levels in the tree with the first CPU */
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
+ n = new_tree->level[level].start_index;
+
+ level_rover[level] = n;
+ node = &new_tree->nodes[n];
+
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+ node->id = id;
+ node->level = level;
+ node->num_cpus = 1;
+
+ node->parent_index = (level > CPUINFO_LVL_ROOT)
+ ? new_tree->level[level - 1].start_index : -1;
+
+ node->child_start = node->child_end = node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : new_tree->level[level + 1].start_index;
+
+ prev_id[level] = node->id;
+ num_cpus[level] = 1;
+ }
+
+ for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
+ if (cpu_online(last_cpu))
+ break;
+ }
+
+ while (++cpu <= last_cpu) {
+ if (!cpu_online(cpu))
+ continue;
+
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
+ level--) {
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+
+ if ((id != prev_id[level]) || (cpu == last_cpu)) {
+ prev_id[level] = id;
+ node = &new_tree->nodes[level_rover[level]];
+ node->num_cpus = num_cpus[level];
+ num_cpus[level] = 1;
+
+ if (cpu == last_cpu)
+ node->num_cpus++;
+
+ /* Connect tree node to parent */
+ if (level == CPUINFO_LVL_ROOT)
+ node->parent_index = -1;
+ else
+ node->parent_index =
+ level_rover[level - 1];
+
+ if (level == CPUINFO_LVL_PROC) {
+ node->child_end =
+ (cpu == last_cpu) ? cpu : prev_cpu;
+ } else {
+ node->child_end =
+ level_rover[level + 1] - 1;
+ }
+
+ /* Initialize the next node in the same level */
+ n = ++level_rover[level];
+ if (n <= new_tree->level[level].end_index) {
+ node = &new_tree->nodes[n];
+ node->id = id;
+ node->level = level;
+
+ /* Connect node to child */
+ node->child_start = node->child_end =
+ node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : level_rover[level + 1];
+ }
+ } else
+ num_cpus[level]++;
+ }
+ prev_cpu = cpu;
+ }
+
+ return new_tree;
+}
+
+static void increment_rover(struct cpuinfo_tree *t, int node_index,
+ int root_index, const int *rover_inc_table)
+{
+ struct cpuinfo_node *node = &t->nodes[node_index];
+ int top_level, level;
+
+ top_level = t->nodes[root_index].level;
+ for (level = node->level; level >= top_level; level--) {
+ node->rover++;
+ if (node->rover <= node->child_end)
+ return;
+
+ node->rover = node->child_start;
+ /* If parent's rover does not need to be adjusted, stop here. */
+ if ((level == top_level) ||
+ !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
+ return;
+
+ node = &t->nodes[node->parent_index];
+ }
+}
+
+static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+{
+ const int *rover_inc_table;
+ int level, new_index, index = root_index;
+
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ case SUN4V_CHIP_NIAGARA2:
+ rover_inc_table = niagara_iterate_method;
+ break;
+ default:
+ rover_inc_table = generic_iterate_method;
+ }
+
+ for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
+ level++) {
+ new_index = t->nodes[index].rover;
+ if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
+ increment_rover(t, index, root_index, rover_inc_table);
+
+ index = new_index;
+ }
+ return index;
+}
+
+static void _cpu_map_rebuild(void)
+{
+ int i;
+
+ if (cpuinfo_tree) {
+ kfree(cpuinfo_tree);
+ cpuinfo_tree = NULL;
+ }
+
+ cpuinfo_tree = build_cpuinfo_tree();
+ if (!cpuinfo_tree)
+ return;
+
+ /* Build CPU distribution map that spans all online CPUs. No need
+ * to check if the CPU is online, as that is done when the cpuinfo
+ * tree is being built.
+ */
+ for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
+ cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
+}
+
+/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
+ * round robin.
+ */
+static int simple_map_to_cpu(unsigned int index)
+{
+ int i, end, cpu_rover;
+
+ cpu_rover = 0;
+ end = index % num_online_cpus();
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (cpu_online(cpu_rover)) {
+ if (cpu_rover >= end)
+ return cpu_rover;
+
+ cpu_rover++;
+ }
+ }
+
+ /* Impossible, since num_online_cpus() <= num_possible_cpus() */
+ return first_cpu(cpu_online_map);
+}
+
+static int _map_to_cpu(unsigned int index)
+{
+ struct cpuinfo_node *root_node;
+
+ if (unlikely(!cpuinfo_tree)) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+
+ root_node = &cpuinfo_tree->nodes[0];
+#ifdef CONFIG_HOTPLUG_CPU
+ if (unlikely(root_node->num_cpus != num_online_cpus())) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+#endif
+ return cpu_distribution_map[index % root_node->num_cpus];
+}
+
+int map_to_cpu(unsigned int index)
+{
+ int mapped_cpu;
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ mapped_cpu = _map_to_cpu(index);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ while (unlikely(!cpu_online(mapped_cpu)))
+ mapped_cpu = _map_to_cpu(index);
+#endif
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+ return mapped_cpu;
+}
+EXPORT_SYMBOL(map_to_cpu);
+
+void cpu_map_rebuild(void)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ _cpu_map_rebuild();
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+}
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
new file mode 100644
index 00000000000..e639880ab86
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.h
@@ -0,0 +1,16 @@
+#ifndef _CPUMAP_H
+#define _CPUMAP_H
+
+#ifdef CONFIG_SMP
+extern void cpu_map_rebuild(void);
+extern int map_to_cpu(unsigned int index);
+#define cpu_map_init() cpu_map_rebuild()
+#else
+#define cpu_map_init() do {} while (0)
+static inline int map_to_cpu(unsigned int index)
+{
+ return raw_smp_processor_id();
+}
+#endif
+
+#endif
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index ebc8403b035..524c32f97c5 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
}
EXPORT_SYMBOL(dma_set_mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void *dma32_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
#endif
return sbus_alloc_consistent(dev, size, dma_handle);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static void dma32_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size,
#endif
sbus_free_consistent(dev, size, cpu_addr, dma_handle);
}
-EXPORT_SYMBOL(dma_free_coherent);
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_map_single(to_pci_dev(dev), cpu_addr,
- size, (int)direction);
-#endif
- return sbus_map_single(dev, cpu_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_unmap_single(to_pci_dev(dev), dma_addr,
- size, (int)direction);
- return;
- }
-#endif
- sbus_unmap_single(dev, dma_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return sbus_map_single(dev, page_address(page) + offset,
size, (int)direction);
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
+static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
#endif
sbus_unmap_single(dev, dma_address, size, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg,
#endif
return sbus_map_sg(dev, sg, nents, direction);
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
#endif
sbus_unmap_sg(dev, sg, nents, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_sg);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
+static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void dma32_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-EXPORT_SYMBOL(dma_mapping_error);
-
-int dma_get_cache_alignment(void)
-{
- return 32;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
+static const struct dma_ops dma32_dma_ops = {
+ .alloc_coherent = dma32_alloc_coherent,
+ .free_coherent = dma32_free_coherent,
+ .map_page = dma32_map_page,
+ .unmap_page = dma32_unmap_page,
+ .map_sg = dma32_map_sg,
+ .unmap_sg = dma32_unmap_sg,
+ .sync_single_for_cpu = dma32_sync_single_for_cpu,
+ .sync_single_for_device = dma32_sync_single_for_device,
+ .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
+ .sync_sg_for_device = dma32_sync_sg_for_device,
+};
+
+const struct dma_ops *dma_ops = &dma32_dma_ops;
+EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 90350f838f0..4a700f4b79c 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
- mdesc_fill_in_cpu_data(*mask);
+ mdesc_populate_present_mask(mask);
+ mdesc_fill_in_cpu_data(mask);
for_each_cpu_mask(cpu, *mask) {
int err;
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d0218e73f98..d3b1a307656 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -7,14 +7,10 @@
#include <asm/ftrace.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
static const u32 ftrace_nop = 0x01000000;
-unsigned char *ftrace_nop_replace(void)
-{
- return (char *)&ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
@@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
- return (unsigned char *) &call;
+ return call;
}
-int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code)
+static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
{
- u32 old = *(u32 *)old_code;
- u32 new = *(u32 *)new_code;
u32 replaced;
int faulted;
@@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return faulted;
}
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop;
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_nop;
+ new = ftrace_call_replace(ip, addr);
+ return ftrace_modify_code(ip, old, new);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ u32 old, new;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+ old = *(u32 *) &ftrace_call;
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
- ftrace_mcount_set(data);
+ unsigned long *p = data;
+
+ *p = 0;
+
return 0;
}
+#endif
+
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 91bf4c7f79b..f8f21050448 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -641,28 +641,6 @@ tlb_fixup_done:
/* Not reached... */
1:
- /* If we boot on a non-zero cpu, all of the per-cpu
- * variable references we make before setting up the
- * per-cpu areas will use a bogus offset. Put a
- * compensating factor into __per_cpu_base to handle
- * this cleanly.
- *
- * What the per-cpu code calculates is:
- *
- * __per_cpu_base + (cpu << __per_cpu_shift)
- *
- * These two variables are zero initially, so to
- * make it all cancel out to zero we need to put
- * "0 - (cpu << 0)" into __per_cpu_base so that the
- * above formula evaluates to zero.
- *
- * We cannot even perform a printk() until this stuff
- * is setup as that calls cpu_clock() which uses
- * per-cpu variables.
- */
- sub %g0, %o0, %o1
- sethi %hi(__per_cpu_base), %o2
- stx %o1, [%o2 + %lo(__per_cpu_base)]
#else
mov 0, %o0
#endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1d5aa..0aeaefe696b 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -472,8 +473,8 @@ do_flush_sync:
vaddr, ctx, npages);
}
-static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
- .map_single = dma_4u_map_single,
- .unmap_single = dma_4u_unmap_single,
+ .map_page = dma_4u_map_page,
+ .unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e5e78f9cfc9..bd075054942 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -45,6 +45,7 @@
#include <asm/cacheflush.h>
#include "entry.h"
+#include "cpumap.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -256,35 +257,13 @@ static int irq_choose_cpu(unsigned int virt_irq)
int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
- if (cpus_equal(mask, CPU_MASK_ALL)) {
- static int irq_rover;
- static DEFINE_SPINLOCK(irq_rover_lock);
- unsigned long flags;
-
- /* Round-robin distribution... */
- do_round_robin:
- spin_lock_irqsave(&irq_rover_lock, flags);
-
- while (!cpu_online(irq_rover)) {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- }
- cpuid = irq_rover;
- do {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- } while (!cpu_online(irq_rover));
-
- spin_unlock_irqrestore(&irq_rover_lock, flags);
+ if (cpus_equal(mask, cpu_online_map)) {
+ cpuid = map_to_cpu(virt_irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
-
- if (cpus_empty(tmp))
- goto do_round_robin;
-
- cpuid = first_cpu(tmp);
+ cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
}
return cpuid;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index f0e6ed23a46..938da19dc06 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -574,7 +574,7 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
-static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
+static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
struct mdesc_handle *hp,
u64 mp)
{
@@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
}
}
-static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
- int core_id)
+static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
{
u64 a;
@@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit set_core_ids(struct mdesc_handle *hp)
+static void __cpuinit set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
@@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp)
}
}
-static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
- int proc_id)
+static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
@@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit __set_proc_ids(struct mdesc_handle *hp,
- const char *exec_unit_name)
+static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
@@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp,
}
}
-static void __devinit set_proc_ids(struct mdesc_handle *hp)
+static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
-static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
+static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
unsigned char def)
{
u64 val;
@@ -745,7 +742,7 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
+static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
const u64 *val;
@@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
+static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
+ void *ret = NULL;
u64 mp;
- ncpus_probed = 0;
mdesc_for_each_node_by_name(hp, mp, "cpu") {
const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
- const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
- struct trap_per_cpu *tb;
- cpuinfo_sparc *c;
- int cpuid;
- u64 a;
-
- ncpus_probed++;
-
- cpuid = *id;
+ int cpuid = *id;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
@@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
cpuid, NR_CPUS);
continue;
}
- if (!cpu_isset(cpuid, mask))
+ if (!cpu_isset(cpuid, *mask))
continue;
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
- c = &cpu_data(cpuid);
- c->clock_tick = *cfreq;
+ ret = func(hp, mp, cpuid, arg);
+ if (ret)
+ goto out;
+ }
+out:
+ mdesc_release(hp);
+ return ret;
+}
- tb = &trap_block[cpuid];
- get_mondo_data(hp, mp, tb);
+static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ ncpus_probed++;
+#ifdef CONFIG_SMP
+ set_cpu_present(cpuid, true);
+#endif
+ return NULL;
+}
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
- u64 j, t = mdesc_arc_target(hp, a);
- const char *t_name;
+void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+{
+ if (tlb_type != hypervisor)
+ return;
- t_name = mdesc_node_name(hp, t);
- if (!strcmp(t_name, "cache")) {
- fill_in_one_cache(c, hp, t);
- continue;
- }
+ ncpus_probed = 0;
+ mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
+}
- mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
- u64 n = mdesc_arc_target(hp, j);
- const char *n_name;
+static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
+ struct trap_per_cpu *tb;
+ cpuinfo_sparc *c;
+ u64 a;
- n_name = mdesc_node_name(hp, n);
- if (!strcmp(n_name, "cache"))
- fill_in_one_cache(c, hp, n);
- }
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ c = &cpu_data(cpuid);
+ c->clock_tick = *cfreq;
+
+ tb = &trap_block[cpuid];
+ get_mondo_data(hp, mp, tb);
+
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 j, t = mdesc_arc_target(hp, a);
+ const char *t_name;
+
+ t_name = mdesc_node_name(hp, t);
+ if (!strcmp(t_name, "cache")) {
+ fill_in_one_cache(c, hp, t);
+ continue;
}
-#ifdef CONFIG_SMP
- cpu_set(cpuid, cpu_present_map);
-#endif
+ mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
+ u64 n = mdesc_arc_target(hp, j);
+ const char *n_name;
- c->core_id = 0;
- c->proc_id = -1;
+ n_name = mdesc_node_name(hp, n);
+ if (!strcmp(n_name, "cache"))
+ fill_in_one_cache(c, hp, n);
+ }
}
+ c->core_id = 0;
+ c->proc_id = -1;
+
+ return NULL;
+}
+
+void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+{
+ struct mdesc_handle *hp;
+
+ mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
+
#ifdef CONFIG_SMP
sparc64_multi_core = 1;
#endif
+ hp = mdesc_grab();
+
set_core_ids(hp);
set_proc_ids(hp);
- smp_fill_in_sib_core_maps();
-
mdesc_release(hp);
+
+ smp_fill_in_sib_core_maps();
}
static ssize_t mdesc_read(struct file *file, char __user *buf,
@@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void)
{
struct mdesc_handle *hp;
unsigned long len, real_len, status;
- cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len);
@@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void)
cur_mdesc = hp;
report_platform_properties();
-
- cpus_setall(mask);
- mdesc_fill_in_cpu_data(mask);
}
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index c8f14c1dc52..90396702ea2 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -6,159 +6,11 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void __init get_cells(struct device_node *dp,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
+#include "of_device_common.h"
/*
* PCI bus specific translator
@@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
return flags;
}
-/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
-{
- return of_bus_default_map(addr, range, na, ns, pna);
-}
-
static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
{
return IORESOURCE_MEM;
@@ -307,7 +118,7 @@ static struct of_bus of_busses[] = {
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
- .map = of_bus_sbus_map,
+ .map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags,
},
/* Default */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5ac287ac03d..881947e59e9 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -10,6 +10,8 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include "of_device_common.h"
+
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
unsigned long ret = res->start + offset;
@@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
}
EXPORT_SYMBOL(of_iounmap);
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void get_cells(struct device_node *dp, int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
-
/*
* PCI bus specific translator
*/
@@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
}
/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-/*
* FHC/Central bus specific translator.
*
* This is just needed to hard-code the address and size cell
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
new file mode 100644
index 00000000000..cb8eb799bb6
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.c
@@ -0,0 +1,174 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include "of_device_common.h"
+
+static int node_match(struct device *dev, void *data)
+{
+ struct of_device *op = to_of_device(dev);
+ struct device_node *dp = data;
+
+ return (op->node == dp);
+}
+
+struct of_device *of_find_device_by_node(struct device_node *dp)
+{
+ struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
+ dp, node_match);
+
+ if (dev)
+ return to_of_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+unsigned int irq_of_parse_and_map(struct device_node *node, int index)
+{
+ struct of_device *op = of_find_device_by_node(node);
+
+ if (!op || index >= op->num_irqs)
+ return 0;
+
+ return op->irqs[index];
+}
+EXPORT_SYMBOL(irq_of_parse_and_map);
+
+/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
+ * BUS and propagate to all child of_device objects.
+ */
+void of_propagate_archdata(struct of_device *bus)
+{
+ struct dev_archdata *bus_sd = &bus->dev.archdata;
+ struct device_node *bus_dp = bus->node;
+ struct device_node *dp;
+
+ for (dp = bus_dp->child; dp; dp = dp->sibling) {
+ struct of_device *op = of_find_device_by_node(dp);
+
+ op->dev.archdata.iommu = bus_sd->iommu;
+ op->dev.archdata.stc = bus_sd->stc;
+ op->dev.archdata.host_controller = bus_sd->host_controller;
+ op->dev.archdata.numa_node = bus_sd->numa_node;
+
+ if (dp->child)
+ of_propagate_archdata(op);
+ }
+}
+
+struct bus_type of_platform_bus_type;
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static void get_cells(struct device_node *dp, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = of_n_addr_cells(dp);
+ if (sizec)
+ *sizec = of_n_size_cells(dp);
+}
+
+/*
+ * Default translator (generic bus)
+ */
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
+{
+ get_cells(dev, addrc, sizec);
+}
+
+/* Make sure the least significant 64-bits are in-range. Even
+ * for 3 or 4 cell values it is a good enough approximation.
+ */
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns)
+{
+ u64 a = of_read_addr(addr, na);
+ u64 b = of_read_addr(base, na);
+
+ if (a < b)
+ return 1;
+
+ b += of_read_addr(size, ns);
+ if (a >= b)
+ return 1;
+
+ return 0;
+}
+
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u32 result[OF_MAX_ADDR_CELLS];
+ int i;
+
+ if (ns > 2) {
+ printk("of_device: Cannot handle size cells (%d) > 2.", ns);
+ return -EINVAL;
+ }
+
+ if (of_out_of_range(addr, range, range + na + pna, na, ns))
+ return -EINVAL;
+
+ /* Start with the parent range base. */
+ memcpy(result, range + na, pna * 4);
+
+ /* Add in the child address offset. */
+ for (i = 0; i < na; i++)
+ result[pna - 1 - i] +=
+ (addr[na - 1 - i] -
+ range[na - 1 - i]);
+
+ memcpy(addr, result, pna * 4);
+
+ return 0;
+}
+
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
+{
+ if (flags)
+ return flags;
+ return IORESOURCE_MEM;
+}
+
+/*
+ * SBUS bus specific translator
+ */
+
+int of_bus_sbus_match(struct device_node *np)
+{
+ struct device_node *dp = np;
+
+ while (dp) {
+ if (!strcmp(dp->name, "sbus") ||
+ !strcmp(dp->name, "sbi"))
+ return 1;
+
+ /* Have a look at use_1to1_mapping(). We're trying
+ * to match SBUS if that's the top-level bus and we
+ * don't have some intervening real bus that provides
+ * ranges based translations.
+ */
+ if (of_find_property(dp, "ranges", NULL) != NULL)
+ break;
+
+ dp = dp->parent;
+ }
+
+ return 0;
+}
+
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
new file mode 100644
index 00000000000..cdfd2399284
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.h
@@ -0,0 +1,36 @@
+#ifndef _OF_DEVICE_COMMON_H
+#define _OF_DEVICE_COMMON_H
+
+static inline u64 of_read_addr(const u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc,
+ int *sizec);
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns);
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags);
+
+int of_bus_sbus_match(struct device_node *np);
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec);
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+
+struct of_bus {
+ const char *name;
+ const char *addr_prop_name;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ int (*map)(u32 *addr, const u32 *range,
+ int na, int ns, int pna);
+ unsigned long (*get_flags)(const u32 *addr, unsigned long);
+};
+
+#endif /* _OF_DEVICE_COMMON_H */
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebed35d..2485eaa2310 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -294,8 +295,8 @@ iommu_map_fail:
return DMA_ERROR_CODE;
}
-static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
- .map_single = dma_4v_map_single,
- .unmap_single = dma_4v_unmap_single,
+ .map_page = dma_4v_map_page,
+ .unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.sync_single_for_cpu = dma_4v_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index bb0f0fda6ca..453397fe5e1 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp)
extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void);
-extern void of_fill_in_cpu_data(void);
extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index ca55c7012f7..fb06ac2bd38 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -374,75 +374,26 @@ static const char *get_mid_prop(void)
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
-struct device_node *of_find_node_by_cpuid(int cpuid)
-{
- struct device_node *dp;
- const char *mid_prop = get_mid_prop();
-
- for_each_node_by_type(dp, "cpu") {
- int id = of_getintprop_default(dp, mid_prop, -1);
- const char *this_mid_prop = mid_prop;
-
- if (id < 0) {
- this_mid_prop = "cpuid";
- id = of_getintprop_default(dp, this_mid_prop, -1);
- }
-
- if (id < 0) {
- prom_printf("OF: Serious problem, cpu lacks "
- "%s property", this_mid_prop);
- prom_halt();
- }
- if (cpuid == id)
- return dp;
- }
- return NULL;
-}
-
-void __init of_fill_in_cpu_data(void)
+static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
const char *mid_prop;
- if (tlb_type == hypervisor)
- return;
-
mid_prop = get_mid_prop();
- ncpus_probed = 0;
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
- struct device_node *portid_parent;
- int portid = -1;
+ void *ret;
- portid_parent = NULL;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
- if (cpuid >= 0) {
- int limit = 2;
-
- portid_parent = dp;
- while (limit--) {
- portid_parent = portid_parent->parent;
- if (!portid_parent)
- break;
- portid = of_getintprop_default(portid_parent,
- "portid", -1);
- if (portid >= 0)
- break;
- }
- }
}
-
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
-
- ncpus_probed++;
-
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
@@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void)
cpuid, NR_CPUS);
continue;
}
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
+ ret = func(dp, cpuid, arg);
+ if (ret)
+ return ret;
+ }
+ return NULL;
+}
- cpu_data(cpuid).clock_tick =
- of_getintprop_default(dp, "clock-frequency", 0);
-
- if (portid_parent) {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "l1-dcache-size",
- 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "l1-dcache-line-size",
- 32);
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "l1-icache-size",
- 8 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "l1-icache-line-size",
- 32);
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "l2-cache-size", 0);
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "l2-cache-line-size", 0);
- if (!cpu_data(cpuid).ecache_size ||
- !cpu_data(cpuid).ecache_line_size) {
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(portid_parent,
- "l2-cache-size",
- (4 * 1024 * 1024));
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(portid_parent,
- "l2-cache-line-size", 64);
- }
-
- cpu_data(cpuid).core_id = portid + 1;
- cpu_data(cpuid).proc_id = portid;
+static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
+{
+ if (id == cpuid)
+ return dp;
+ return NULL;
+}
+
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+ return of_iterate_over_cpus(check_cpu_node, cpuid);
+}
+
+static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ ncpus_probed++;
#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
+ set_cpu_present(cpuid, true);
+ set_cpu_possible(cpuid, true);
#endif
- } else {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "dcache-size", 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "dcache-line-size", 32);
+ return NULL;
+}
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "icache-size", 16 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "icache-line-size", 32);
+void __init of_populate_present_mask(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ ncpus_probed = 0;
+ of_iterate_over_cpus(record_one_cpu, 0);
+}
+static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ struct device_node *portid_parent = NULL;
+ int portid = -1;
+
+ if (of_find_property(dp, "cpuid", NULL)) {
+ int limit = 2;
+
+ portid_parent = dp;
+ while (limit--) {
+ portid_parent = portid_parent->parent;
+ if (!portid_parent)
+ break;
+ portid = of_getintprop_default(portid_parent,
+ "portid", -1);
+ if (portid >= 0)
+ break;
+ }
+ }
+
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ cpu_data(cpuid).clock_tick =
+ of_getintprop_default(dp, "clock-frequency", 0);
+
+ if (portid_parent) {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "l1-dcache-size",
+ 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "l1-dcache-line-size",
+ 32);
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "l1-icache-size",
+ 8 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "l1-icache-line-size",
+ 32);
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "l2-cache-size", 0);
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "l2-cache-line-size", 0);
+ if (!cpu_data(cpuid).ecache_size ||
+ !cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "ecache-size",
+ of_getintprop_default(portid_parent,
+ "l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "ecache-line-size", 64);
-
- cpu_data(cpuid).core_id = 0;
- cpu_data(cpuid).proc_id = -1;
+ of_getintprop_default(portid_parent,
+ "l2-cache-line-size", 64);
}
+ cpu_data(cpuid).core_id = portid + 1;
+ cpu_data(cpuid).proc_id = portid;
#ifdef CONFIG_SMP
- set_cpu_present(cpuid, true);
- set_cpu_possible(cpuid, true);
+ sparc64_multi_core = 1;
#endif
+ } else {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "dcache-size", 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "dcache-line-size", 32);
+
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "icache-size", 16 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "icache-line-size", 32);
+
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "ecache-size",
+ (4 * 1024 * 1024));
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "ecache-line-size", 64);
+
+ cpu_data(cpuid).core_id = 0;
+ cpu_data(cpuid).proc_id = -1;
}
+ return NULL;
+}
+
+void __init of_fill_in_cpu_data(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ of_iterate_over_cpus(fill_in_one_cpu, 0);
+
smp_fill_in_sib_core_maps();
}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index ff7b591c894..0fb5789d43c 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -313,6 +313,4 @@ void __init prom_build_devicetree(void)
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
-
- of_fill_in_cpu_data();
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7642e5a94d..fa44eaf8d89 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -20,7 +20,8 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
#include <linux/cpu.h>
#include <asm/head.h>
@@ -47,6 +48,8 @@
#include <asm/ldc.h>
#include <asm/hypervisor.h>
+#include "cpumap.h"
+
int sparc64_multi_core __read_mostly;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p)
return kern_base + (val - KERNBASE);
}
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
@@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
"hvtramp_descr.\n");
return;
}
+ *descrp = hdesc;
hdesc->cpu = cpu;
hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
- tb->hdesc = hdesc;
hdesc->fault_info_va = (unsigned long) &tb->fault_info;
hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL;
static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
{
- struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
+ void *descr = NULL;
int timeout, ret;
p = fork_idle(cpu);
@@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
if (ldom_domaining_enabled)
ldom_startcpu_cpuid(cpu,
- (unsigned long) cpu_new_thread);
+ (unsigned long) cpu_new_thread,
+ &descr);
else
#endif
prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
}
cpu_new_thread = NULL;
- if (tb->hdesc) {
- kfree(tb->hdesc);
- tb->hdesc = NULL;
- }
+ kfree(descr);
return ret;
}
@@ -1315,6 +1316,8 @@ int __cpu_disable(void)
cpu_clear(cpu, cpu_online_map);
ipi_call_unlock();
+ cpu_map_rebuild();
+
return 0;
}
@@ -1373,36 +1376,171 @@ void smp_send_stop(void)
{
}
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+ unsigned long align)
+{
+ const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int node = cpu_to_node(cpu);
+ void *ptr;
+
+ if (!node_online(node) || !NODE_DATA(node)) {
+ ptr = __alloc_bootmem(size, align, goal);
+ pr_info("cpu %d has no node %d or node-local memory\n",
+ cpu, node);
+ pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+ cpu, size, __pa(ptr));
+ } else {
+ ptr = __alloc_bootmem_node(NODE_DATA(node),
+ size, align, goal);
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+ "%016lx\n", cpu, size, node, __pa(ptr));
+ }
+ return ptr;
+#else
+ return __alloc_bootmem(size, align, goal);
+#endif
+}
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
-void __init real_setup_per_cpu_areas(void)
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
{
- unsigned long paddr, goal, size, i;
- char *ptr;
+ size_t off = (size_t)pageno << PAGE_SHIFT;
- /* Copy section for each CPU (we discard the original) */
- goal = PERCPU_ENOUGH_ROOM;
+ if (off >= pcpur_size)
+ return NULL;
- __per_cpu_shift = PAGE_SHIFT;
- for (size = PAGE_SIZE; size < goal; size <<= 1UL)
- __per_cpu_shift++;
+ return virt_to_page(pcpur_ptrs[cpu] + off);
+}
+
+#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
+
+static void __init pcpu_map_range(unsigned long start, unsigned long end,
+ struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long pte_base;
+
+ BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ while (start < end) {
+ pgd_t *pgd = pgd_offset_k(start);
+ unsigned long this_end;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pud = pud_offset(pgd, start);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
- paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
- if (!paddr) {
- prom_printf("Cannot allocate per-cpu memory.\n");
- prom_halt();
+ pte = pte_offset_kernel(pmd, start);
+ this_end = (start + PMD_SIZE) & PMD_MASK;
+ if (this_end > end)
+ this_end = end;
+
+ while (start < this_end) {
+ unsigned long paddr = pfn << PAGE_SHIFT;
+
+ pte_val(*pte) = (paddr | pte_base);
+
+ start += PAGE_SIZE;
+ pte++;
+ pfn++;
+ }
+ }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
+ static struct vm_struct vm;
+ unsigned long delta, cpu;
+ size_t pcpu_unit_size;
+ size_t ptrs_size;
+
+ pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE);
+ dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
+
+
+ ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+ pcpur_ptrs = alloc_bootmem(ptrs_size);
+
+ for_each_possible_cpu(cpu) {
+ pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+ PCPU_CHUNK_SIZE);
+
+ free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+ PCPU_CHUNK_SIZE - pcpur_size);
+
+ memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
}
- ptr = __va(paddr);
- __per_cpu_base = ptr - __per_cpu_start;
+ /* allocate address and map */
+ vm.flags = VM_ALLOC;
+ vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
+ vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long start = (unsigned long) vm.addr;
+ unsigned long end;
+
+ start += cpu * PCPU_CHUNK_SIZE;
+ end = start + PCPU_CHUNK_SIZE;
+ pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+ }
- for (i = 0; i < NR_CPUS; i++, ptr += size)
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+ PERCPU_MODULE_RESERVE, dyn_size,
+ PCPU_CHUNK_SIZE, vm.addr, NULL);
+
+ free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
+ __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ }
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+ of_fill_in_cpu_data();
+ if (tlb_type == hypervisor)
+ mdesc_fill_in_cpu_data(cpu_all_mask);
}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 00ec3b15f38..69090165729 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -81,4 +81,6 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo
+
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 82b5bf85b9d..6b3ee88e253 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -82,7 +82,8 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
+ .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo
#endif /* CONFIG_COMPAT */
@@ -156,4 +157,5 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+ .word sys_pwritev, sys_rt_tgsigqueueinfo
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d809c4ebb48..10f7bb9fc14 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs)
}
struct trap_per_cpu trap_block[NR_CPUS];
+EXPORT_SYMBOL(trap_block);
/* This can get invoked before sched_init() so play it super safe
* and use hard_smp_processor_id().
@@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void);
void __init trap_init(void)
{
/* Compile time sanity check. */
- if (TI_TASK != offsetof(struct thread_info, task) ||
- TI_FLAGS != offsetof(struct thread_info, flags) ||
- TI_CPU != offsetof(struct thread_info, cpu) ||
- TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
- TI_KSP != offsetof(struct thread_info, ksp) ||
- TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
- TI_KREGS != offsetof(struct thread_info, kregs) ||
- TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
- TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
- TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
- TI_GSR != offsetof(struct thread_info, gsr) ||
- TI_XFSR != offsetof(struct thread_info, xfsr) ||
- TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
- TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
- TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
- TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
- TI_PCR != offsetof(struct thread_info, pcr_reg) ||
- TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
- TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
- TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
- TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
- TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
- TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
- TI_FPREGS != offsetof(struct thread_info, fpregs) ||
- (TI_FPREGS & (64 - 1)))
- thread_info_offsets_are_bolixed_dave();
-
- if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
- (TRAP_PER_CPU_PGD_PADDR !=
- offsetof(struct trap_per_cpu, pgd_paddr)) ||
- (TRAP_PER_CPU_CPU_MONDO_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
- (TRAP_PER_CPU_DEV_MONDO_PA !=
- offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
- (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_FAULT_INFO !=
- offsetof(struct trap_per_cpu, fault_info)) ||
- (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
- (TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)) ||
- (TRAP_PER_CPU_TSB_HUGE !=
- offsetof(struct trap_per_cpu, tsb_huge)) ||
- (TRAP_PER_CPU_TSB_HUGE_TEMP !=
- offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
- (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
- offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
- (TRAP_PER_CPU_CPU_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
- (TRAP_PER_CPU_DEV_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
- (TRAP_PER_CPU_RESUM_QMASK !=
- offsetof(struct trap_per_cpu, resum_qmask)) ||
- (TRAP_PER_CPU_NONRESUM_QMASK !=
- offsetof(struct trap_per_cpu, nonresum_qmask)))
- trap_per_cpu_offsets_are_bolixed_dave();
-
- if ((TSB_CONFIG_TSB !=
- offsetof(struct tsb_config, tsb)) ||
- (TSB_CONFIG_RSS_LIMIT !=
- offsetof(struct tsb_config, tsb_rss_limit)) ||
- (TSB_CONFIG_NENTRIES !=
- offsetof(struct tsb_config, tsb_nentries)) ||
- (TSB_CONFIG_REG_VAL !=
- offsetof(struct tsb_config, tsb_reg_val)) ||
- (TSB_CONFIG_MAP_VADDR !=
- offsetof(struct tsb_config, tsb_map_vaddr)) ||
- (TSB_CONFIG_MAP_PTE !=
- offsetof(struct tsb_config, tsb_map_pte)))
- tsb_config_offsets_are_bolixed_dave();
+ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
+ TI_FLAGS != offsetof(struct thread_info, flags) ||
+ TI_CPU != offsetof(struct thread_info, cpu) ||
+ TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+ TI_KSP != offsetof(struct thread_info, ksp) ||
+ TI_FAULT_ADDR != offsetof(struct thread_info,
+ fault_address) ||
+ TI_KREGS != offsetof(struct thread_info, kregs) ||
+ TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+ TI_EXEC_DOMAIN != offsetof(struct thread_info,
+ exec_domain) ||
+ TI_REG_WINDOW != offsetof(struct thread_info,
+ reg_window) ||
+ TI_RWIN_SPTRS != offsetof(struct thread_info,
+ rwbuf_stkptrs) ||
+ TI_GSR != offsetof(struct thread_info, gsr) ||
+ TI_XFSR != offsetof(struct thread_info, xfsr) ||
+ TI_USER_CNTD0 != offsetof(struct thread_info,
+ user_cntd0) ||
+ TI_USER_CNTD1 != offsetof(struct thread_info,
+ user_cntd1) ||
+ TI_KERN_CNTD0 != offsetof(struct thread_info,
+ kernel_cntd0) ||
+ TI_KERN_CNTD1 != offsetof(struct thread_info,
+ kernel_cntd1) ||
+ TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+ TI_PRE_COUNT != offsetof(struct thread_info,
+ preempt_count) ||
+ TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
+ TI_SYS_NOERROR != offsetof(struct thread_info,
+ syscall_noerror) ||
+ TI_RESTART_BLOCK != offsetof(struct thread_info,
+ restart_block) ||
+ TI_KUNA_REGS != offsetof(struct thread_info,
+ kern_una_regs) ||
+ TI_KUNA_INSN != offsetof(struct thread_info,
+ kern_una_insn) ||
+ TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+ (TI_FPREGS & (64 - 1)));
+
+ BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
+ thread) ||
+ (TRAP_PER_CPU_PGD_PADDR !=
+ offsetof(struct trap_per_cpu, pgd_paddr)) ||
+ (TRAP_PER_CPU_CPU_MONDO_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
+ (TRAP_PER_CPU_DEV_MONDO_PA !=
+ offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_FAULT_INFO !=
+ offsetof(struct trap_per_cpu, fault_info)) ||
+ (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+ (TRAP_PER_CPU_CPU_LIST_PA !=
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+ (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+ offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
+ (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+ (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+ (TRAP_PER_CPU_RESUM_QMASK !=
+ offsetof(struct trap_per_cpu, resum_qmask)) ||
+ (TRAP_PER_CPU_NONRESUM_QMASK !=
+ offsetof(struct trap_per_cpu, nonresum_qmask)) ||
+ (TRAP_PER_CPU_PER_CPU_BASE !=
+ offsetof(struct trap_per_cpu, __per_cpu_base)));
+
+ BUILD_BUG_ON((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)));
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index cbb282dab5a..26bb3919ff1 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -358,6 +358,7 @@ void __init paging_init(void)
protection_map[15] = PAGE_SHARED;
btfixup();
prom_build_devicetree();
+ of_fill_in_cpu_data();
device_scan();
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f26a352c08a..ca92e2f54e4 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1679,11 +1679,6 @@ pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
-/* Dummy function */
-void __init setup_per_cpu_areas(void)
-{
-}
-
void __init paging_init(void)
{
unsigned long end_pfn, shift, phys_base;
@@ -1799,16 +1794,13 @@ void __init paging_init(void)
if (tlb_type == hypervisor)
sun4v_ktsb_register();
- /* We must setup the per-cpu areas before we pull in the
- * PROM and the MDESC. The code there fills in cpu and
- * other information into per-cpu data structures.
- */
- real_setup_per_cpu_areas();
-
prom_build_devicetree();
+ of_populate_present_mask();
- if (tlb_type == hypervisor)
+ if (tlb_type == hypervisor) {
sun4v_mdesc_init();
+ mdesc_populate_present_mask(cpu_all_mask);
+ }
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 06c9a7d9820..ade4eb373bd 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kdebug.h>
+#include <linux/log2.h>
#include <asm/bitext.h>
#include <asm/page.h>
@@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size)
vaddr, srmmu_nocache_end);
BUG();
}
- if (size & (size-1)) {
+ if (!is_power_of_2(size)) {
printk("Size 0x%x is not a power of 2\n", size);
BUG();
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 434ba121e3c..3b44b47c7e1 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -360,7 +360,7 @@ static struct platform_driver uml_net_driver = {
static void net_device_release(struct device *dev)
{
- struct uml_net *device = dev->driver_data;
+ struct uml_net *device = dev_get_drvdata(dev);
struct net_device *netdev = device->dev;
struct uml_net_private *lp = netdev_priv(netdev);
@@ -440,7 +440,7 @@ static void eth_configure(int n, void *init, char *mac,
device->pdev.id = n;
device->pdev.name = DRIVER_NAME;
device->pdev.dev.release = net_device_release;
- device->pdev.dev.driver_data = device;
+ dev_set_drvdata(&device->pdev.dev, device);
if (platform_device_register(&device->pdev))
goto out_free_netdev;
SET_NETDEV_DEV(dev,&device->pdev.dev);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index aa9e926e13d..8f05d4d9da1 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -778,7 +778,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
static void ubd_device_release(struct device *dev)
{
- struct ubd *ubd_dev = dev->driver_data;
+ struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_cleanup_queue(ubd_dev->queue);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
@@ -807,7 +807,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
ubd_devs[unit].pdev.id = unit;
ubd_devs[unit].pdev.name = DRIVER_NAME;
ubd_devs[unit].pdev.dev.release = ubd_device_release;
- ubd_devs[unit].pdev.dev.driver_data = &ubd_devs[unit];
+ dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 356d2ec8e2f..cf42fc30541 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -46,6 +46,7 @@ config X86
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select HAVE_ARCH_KMEMCHECK
config OUTPUT_FORMAT
string
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index edbd0ca6206..1b68659c41b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
endif
+# Don't unroll struct assignments with kmemcheck enabled
+ifeq ($(CONFIG_KMEMCHECK),y)
+ KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
+endif
+
# Stackpointer is addressed different for 32 bit and 64 bit x86
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index f82fdc412c6..b93405b228b 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -6,6 +6,7 @@
* Documentation/DMA-API.txt for documentation.
*/
+#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
@@ -60,6 +61,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
+ kmemcheck_mark_initialized(ptr, size);
addr = ops->map_page(hwdev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, NULL);
@@ -87,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg,
{
struct dma_map_ops *ops = get_dma_ops(hwdev);
int ents;
+ struct scatterlist *s;
+ int i;
BUG_ON(!valid_dma_direction(dir));
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
debug_dma_map_sg(hwdev, sg, nents, ents, dir);
@@ -200,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
new file mode 100644
index 00000000000..ed01518f297
--- /dev/null
+++ b/arch/x86/include/asm/kmemcheck.h
@@ -0,0 +1,42 @@
+#ifndef ASM_X86_KMEMCHECK_H
+#define ASM_X86_KMEMCHECK_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KMEMCHECK
+bool kmemcheck_active(struct pt_regs *regs);
+
+void kmemcheck_show(struct pt_regs *regs);
+void kmemcheck_hide(struct pt_regs *regs);
+
+bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code);
+bool kmemcheck_trap(struct pt_regs *regs);
+#else
+static inline bool kmemcheck_active(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void kmemcheck_show(struct pt_regs *regs)
+{
+}
+
+static inline void kmemcheck_hide(struct pt_regs *regs)
+{
+}
+
+static inline bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code)
+{
+ return false;
+}
+
+static inline bool kmemcheck_trap(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_KMEMCHECK */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18ef7ebf263..3cc06e3fceb 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -317,6 +317,11 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
+static inline int pte_hidden(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_HIDDEN;
+}
+
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4d258ad76a0..54cb697f490 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -18,7 +18,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
@@ -41,13 +41,18 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_KMEMCHECK
+#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#else
+#define _PAGE_HIDDEN (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 0e0e3ba827f..c86f452256d 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
+#ifndef CONFIG_KMEMCHECK
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(t, f, n) __memcpy((t), (f), (n))
+#endif
#endif
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 2afe164bf1e..19e2c468fc2 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
function. */
#define __HAVE_ARCH_MEMCPY 1
+#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
@@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
+#endif
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 602c769fc98..b0783520988 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -154,9 +154,9 @@ struct thread_info {
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
-#define THREAD_FLAGS GFP_KERNEL
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 11b3bb86e17..7fcf6f3dbcc 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,5 +1,10 @@
+#ifdef CONFIG_KMEMCHECK
+/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
+# include <asm-generic/xor.h>
+#else
#ifdef CONFIG_X86_32
# include "xor_32.h"
#else
# include "xor_64.h"
#endif
+#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index daed39ba261..3260ab04499 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);
+
+#ifdef CONFIG_KMEMCHECK
+ /*
+ * P4s have a "fast strings" feature which causes single-
+ * stepping REP instructions to only generate a #DB on
+ * cache-line boundaries.
+ *
+ * Ingo Molnar reported a Pentium D (model 6) and a Xeon
+ * (model 2) with the same problem.
+ */
+ if (c->x86 == 15) {
+ u64 misc_enable;
+
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+ if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
+ printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
+
+ misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
+ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ }
+ }
+#endif
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2ac1f0c2beb..b07af886124 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -182,6 +182,11 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier =
.notifier_call = cpuid_class_cpu_callback,
};
+static char *cpuid_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
+}
+
static int __init cpuid_init(void)
{
int i, err = 0;
@@ -198,6 +203,7 @@ static int __init cpuid_init(void)
err = PTR_ERR(cpuid_class);
goto out_chrdev;
}
+ cpuid_class->nodename = cpuid_nodename;
for_each_online_cpu(i) {
err = cpuid_device_create(i);
if (err != 0)
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 9c4461501fc..9371448290a 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -236,6 +236,7 @@ static const struct file_operations microcode_fops = {
static struct miscdevice microcode_dev = {
.minor = MICROCODE_MINOR,
.name = "microcode",
+ .devnode = "cpu/microcode",
.fops = &microcode_fops,
};
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 3cf3413ec62..98fd6cd4e3a 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -196,6 +196,11 @@ static struct notifier_block __refdata msr_class_cpu_notifier = {
.notifier_call = msr_class_cpu_callback,
};
+static char *msr_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
static int __init msr_init(void)
{
int i, err = 0;
@@ -212,6 +217,7 @@ static int __init msr_init(void)
err = PTR_ERR(msr_class);
goto out_chrdev;
}
+ msr_class->nodename = msr_nodename;
for_each_online_cpu(i) {
err = msr_device_create(i);
if (err != 0)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3bb2be1649b..994dd6a4a2a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -63,7 +63,7 @@ void arch_task_cache_init(void)
task_xstate_cachep =
kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
- SLAB_PANIC, NULL);
+ SLAB_PANIC | SLAB_NOTRACK, NULL);
}
/*
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4aaf7e48394..c3eb207181f 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -77,6 +77,13 @@ void save_stack_trace(struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(save_stack_trace);
+void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
+{
+ dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1e1e27b7d43..5f935f0d586 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -45,6 +45,7 @@
#include <linux/edac.h>
#endif
+#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
@@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
get_debugreg(condition, 6);
+ /* Catch kmemcheck conditions first of all! */
+ if (condition & DR_STEP && kmemcheck_trap(regs))
+ return;
+
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fdd30d08ab5..eefdeee8a87 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o
+obj-$(CONFIG_KMEMCHECK) += kmemcheck/
+
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c6acc632637..baa0e86adfb 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -14,6 +14,7 @@
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
+#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
/*
* Page fault error code bits:
@@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Get the faulting address: */
address = read_cr2();
+ /*
+ * Detect and handle instructions that would cause a page fault for
+ * both a tracked kernel page and a userspace page.
+ */
+ if (kmemcheck_active(regs))
+ kmemcheck_hide(regs);
+
if (unlikely(kmmio_fault(regs, address)))
return;
@@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
- if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
- vmalloc_fault(address) >= 0)
- return;
+ if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+
+ if (kmemcheck_fault(regs, address, error_code))
+ return;
+ }
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 34c1bfb64f1..f53b57e4086 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
init_gbpages();
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9ff3c0816d1..3cd7711bb94 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *page_table = NULL;
if (after_bootmem) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 52bb9519bb8..9c543290a81 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -104,7 +104,7 @@ static __ref void *spp_getpage(void)
void *ptr;
if (after_bootmem)
- ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+ ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
else
ptr = alloc_bootmem_pages(PAGE_SIZE);
@@ -281,7 +281,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
void *adr;
if (after_bootmem) {
- adr = (void *)get_zeroed_page(GFP_ATOMIC);
+ adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
*phys = __pa(adr);
return adr;
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
new file mode 100644
index 00000000000..520b3bce409
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/Makefile
@@ -0,0 +1 @@
+obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
new file mode 100644
index 00000000000..4901d0dafda
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -0,0 +1,228 @@
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+
+#include "error.h"
+#include "shadow.h"
+
+enum kmemcheck_error_type {
+ KMEMCHECK_ERROR_INVALID_ACCESS,
+ KMEMCHECK_ERROR_BUG,
+};
+
+#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
+
+struct kmemcheck_error {
+ enum kmemcheck_error_type type;
+
+ union {
+ /* KMEMCHECK_ERROR_INVALID_ACCESS */
+ struct {
+ /* Kind of access that caused the error */
+ enum kmemcheck_shadow state;
+ /* Address and size of the erroneous read */
+ unsigned long address;
+ unsigned int size;
+ };
+ };
+
+ struct pt_regs regs;
+ struct stack_trace trace;
+ unsigned long trace_entries[32];
+
+ /* We compress it to a char. */
+ unsigned char shadow_copy[SHADOW_COPY_SIZE];
+ unsigned char memory_copy[SHADOW_COPY_SIZE];
+};
+
+/*
+ * Create a ring queue of errors to output. We can't call printk() directly
+ * from the kmemcheck traps, since this may call the console drivers and
+ * result in a recursive fault.
+ */
+static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
+static unsigned int error_count;
+static unsigned int error_rd;
+static unsigned int error_wr;
+static unsigned int error_missed_count;
+
+static struct kmemcheck_error *error_next_wr(void)
+{
+ struct kmemcheck_error *e;
+
+ if (error_count == ARRAY_SIZE(error_fifo)) {
+ ++error_missed_count;
+ return NULL;
+ }
+
+ e = &error_fifo[error_wr];
+ if (++error_wr == ARRAY_SIZE(error_fifo))
+ error_wr = 0;
+ ++error_count;
+ return e;
+}
+
+static struct kmemcheck_error *error_next_rd(void)
+{
+ struct kmemcheck_error *e;
+
+ if (error_count == 0)<