aboutsummaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ChangeLog775
-rw-r--r--drivers/char/Kconfig988
-rw-r--r--drivers/char/Makefile118
-rw-r--r--drivers/char/agp/Kconfig171
-rw-r--r--drivers/char/agp/Makefile18
-rw-r--r--drivers/char/agp/agp.h331
-rw-r--r--drivers/char/agp/ali-agp.c414
-rw-r--r--drivers/char/agp/alpha-agp.c216
-rw-r--r--drivers/char/agp/amd-k7-agp.c542
-rw-r--r--drivers/char/agp/amd64-agp.c761
-rw-r--r--drivers/char/agp/ati-agp.c548
-rw-r--r--drivers/char/agp/backend.c348
-rw-r--r--drivers/char/agp/efficeon-agp.c463
-rw-r--r--drivers/char/agp/frontend.c1103
-rw-r--r--drivers/char/agp/generic.c1222
-rw-r--r--drivers/char/agp/hp-agp.c552
-rw-r--r--drivers/char/agp/i460-agp.c642
-rw-r--r--drivers/char/agp/intel-agp.c1833
-rw-r--r--drivers/char/agp/isoch.c470
-rw-r--r--drivers/char/agp/nvidia-agp.c424
-rw-r--r--drivers/char/agp/sgi-agp.c331
-rw-r--r--drivers/char/agp/sis-agp.c360
-rw-r--r--drivers/char/agp/sworks-agp.c556
-rw-r--r--drivers/char/agp/uninorth-agp.c647
-rw-r--r--drivers/char/agp/via-agp.c548
-rw-r--r--drivers/char/amiserial.c2179
-rw-r--r--drivers/char/applicom.c862
-rw-r--r--drivers/char/applicom.h85
-rw-r--r--drivers/char/cd1865.h263
-rw-r--r--drivers/char/consolemap.c672
-rw-r--r--drivers/char/cp437.uni291
-rw-r--r--drivers/char/cyclades.c5540
-rw-r--r--drivers/char/decserial.c100
-rw-r--r--drivers/char/defkeymap.c_shipped262
-rw-r--r--drivers/char/defkeymap.map357
-rw-r--r--drivers/char/digi.h71
-rw-r--r--drivers/char/digi1.h100
-rw-r--r--drivers/char/digiFep1.h136
-rw-r--r--drivers/char/digiPCI.h42
-rw-r--r--drivers/char/drm/Kconfig98
-rw-r--r--drivers/char/drm/Makefile33
-rw-r--r--drivers/char/drm/README.drm46
-rw-r--r--drivers/char/drm/ati_pcigart.c208
-rw-r--r--drivers/char/drm/drm.h675
-rw-r--r--drivers/char/drm/drmP.h1073
-rw-r--r--drivers/char/drm/drm_agpsupport.c448
-rw-r--r--drivers/char/drm/drm_auth.c230
-rw-r--r--drivers/char/drm/drm_bufs.c1270
-rw-r--r--drivers/char/drm/drm_context.c578
-rw-r--r--drivers/char/drm/drm_core.h34
-rw-r--r--drivers/char/drm/drm_dma.c180
-rw-r--r--drivers/char/drm/drm_drawable.c56
-rw-r--r--drivers/char/drm/drm_drv.c531
-rw-r--r--drivers/char/drm/drm_fops.c451
-rw-r--r--drivers/char/drm/drm_init.c52
-rw-r--r--drivers/char/drm/drm_ioctl.c370
-rw-r--r--drivers/char/drm/drm_irq.c370
-rw-r--r--drivers/char/drm/drm_lock.c303
-rw-r--r--drivers/char/drm/drm_memory.c181
-rw-r--r--drivers/char/drm/drm_memory.h197
-rw-r--r--drivers/char/drm/drm_memory_debug.h459
-rw-r--r--drivers/char/drm/drm_os_linux.h149
-rw-r--r--drivers/char/drm/drm_pci.c140
-rw-r--r--drivers/char/drm/drm_pciids.h224
-rw-r--r--drivers/char/drm/drm_proc.c539
-rw-r--r--drivers/char/drm/drm_sarea.h78
-rw-r--r--drivers/char/drm/drm_scatter.c231
-rw-r--r--drivers/char/drm/drm_stub.c319
-rw-r--r--drivers/char/drm/drm_sysfs.c208
-rw-r--r--drivers/char/drm/drm_vm.c678
-rw-r--r--drivers/char/drm/ffb_context.c551
-rw-r--r--drivers/char/drm/ffb_drv.c365
-rw-r--r--drivers/char/drm/ffb_drv.h286
-rw-r--r--drivers/char/drm/gamma_context.h492
-rw-r--r--drivers/char/drm/gamma_dma.c946
-rw-r--r--drivers/char/drm/gamma_drm.h90
-rw-r--r--drivers/char/drm/gamma_drv.c59
-rw-r--r--drivers/char/drm/gamma_drv.h147
-rw-r--r--drivers/char/drm/gamma_lists.h215
-rw-r--r--drivers/char/drm/gamma_lock.h140
-rw-r--r--drivers/char/drm/gamma_old_dma.h313
-rw-r--r--drivers/char/drm/i810_dma.c1385
-rw-r--r--drivers/char/drm/i810_drm.h289
-rw-r--r--drivers/char/drm/i810_drv.c126
-rw-r--r--drivers/char/drm/i810_drv.h236
-rw-r--r--drivers/char/drm/i830_dma.c1588
-rw-r--r--drivers/char/drm/i830_drm.h350
-rw-r--r--drivers/char/drm/i830_drv.c137
-rw-r--r--drivers/char/drm/i830_drv.h301
-rw-r--r--drivers/char/drm/i830_irq.c204
-rw-r--r--drivers/char/drm/i915_dma.c725
-rw-r--r--drivers/char/drm/i915_drm.h167
-rw-r--r--drivers/char/drm/i915_drv.c104
-rw-r--r--drivers/char/drm/i915_drv.h243
-rw-r--r--drivers/char/drm/i915_irq.c161
-rw-r--r--drivers/char/drm/i915_mem.c346
-rw-r--r--drivers/char/drm/mga_dma.c754
-rw-r--r--drivers/char/drm/mga_drm.h349
-rw-r--r--drivers/char/drm/mga_drv.c127
-rw-r--r--drivers/char/drm/mga_drv.h638
-rw-r--r--drivers/char/drm/mga_irq.c102
-rw-r--r--drivers/char/drm/mga_state.c1123
-rw-r--r--drivers/char/drm/mga_ucode.h11645
-rw-r--r--drivers/char/drm/mga_warp.c210
-rw-r--r--drivers/char/drm/r128_cce.c943
-rw-r--r--drivers/char/drm/r128_drm.h345
-rw-r--r--drivers/char/drm/r128_drv.c122
-rw-r--r--drivers/char/drm/r128_drv.h521
-rw-r--r--drivers/char/drm/r128_irq.c102
-rw-r--r--drivers/char/drm/r128_state.c1732
-rw-r--r--drivers/char/drm/radeon_cp.c2061
-rw-r--r--drivers/char/drm/radeon_drm.h659
-rw-r--r--drivers/char/drm/radeon_drv.c127
-rw-r--r--drivers/char/drm/radeon_drv.h1044
-rw-r--r--drivers/char/drm/radeon_irq.c251
-rw-r--r--drivers/char/drm/radeon_mem.c322
-rw-r--r--drivers/char/drm/radeon_state.c3102
-rw-r--r--drivers/char/drm/sis_drm.h42
-rw-r--r--drivers/char/drm/sis_drv.c110
-rw-r--r--drivers/char/drm/sis_drv.h52
-rw-r--r--drivers/char/drm/sis_ds.c301
-rw-r--r--drivers/char/drm/sis_ds.h145
-rw-r--r--drivers/char/drm/sis_mm.c417
-rw-r--r--drivers/char/drm/tdfx_drv.c107
-rw-r--r--drivers/char/drm/tdfx_drv.h50
-rw-r--r--drivers/char/ds1286.c578
-rw-r--r--drivers/char/ds1302.c354
-rw-r--r--drivers/char/ds1620.c416
-rw-r--r--drivers/char/dsp56k.c547
-rw-r--r--drivers/char/dtlk.c659
-rw-r--r--drivers/char/ec3104_keyb.c459
-rw-r--r--drivers/char/efirtc.c417
-rw-r--r--drivers/char/epca.c3789
-rw-r--r--drivers/char/epca.h165
-rw-r--r--drivers/char/epcaconfig.h7
-rw-r--r--drivers/char/esp.c2630
-rw-r--r--drivers/char/ftape/Kconfig340
-rw-r--r--drivers/char/ftape/Makefile28
-rw-r--r--drivers/char/ftape/README.PCI81
-rw-r--r--drivers/char/ftape/RELEASE-NOTES966
-rw-r--r--drivers/char/ftape/compressor/Makefile31
-rw-r--r--drivers/char/ftape/compressor/lzrw3.c743
-rw-r--r--drivers/char/ftape/compressor/lzrw3.h253
-rw-r--r--drivers/char/ftape/compressor/zftape-compress.c1203
-rw-r--r--drivers/char/ftape/compressor/zftape-compress.h83
-rw-r--r--drivers/char/ftape/lowlevel/Makefile43
-rw-r--r--drivers/char/ftape/lowlevel/fc-10.c175
-rw-r--r--drivers/char/ftape/lowlevel/fc-10.h39
-rw-r--r--drivers/char/ftape/lowlevel/fdc-io.c1352
-rw-r--r--drivers/char/ftape/lowlevel/fdc-io.h252
-rw-r--r--drivers/char/ftape/lowlevel/fdc-isr.c1170
-rw-r--r--drivers/char/ftape/lowlevel/fdc-isr.h55
-rw-r--r--drivers/char/ftape/lowlevel/ftape-bsm.c491
-rw-r--r--drivers/char/ftape/lowlevel/ftape-bsm.h66
-rw-r--r--drivers/char/ftape/lowlevel/ftape-buffer.c129
-rw-r--r--drivers/char/ftape/lowlevel/ftape-buffer.h32
-rw-r--r--drivers/char/ftape/lowlevel/ftape-calibr.c276
-rw-r--r--drivers/char/ftape/lowlevel/ftape-calibr.h37
-rw-r--r--drivers/char/ftape/lowlevel/ftape-ctl.c897
-rw-r--r--drivers/char/ftape/lowlevel/ftape-ctl.h162
-rw-r--r--drivers/char/ftape/lowlevel/ftape-ecc.c853
-rw-r--r--drivers/char/ftape/lowlevel/ftape-ecc.h84
-rw-r--r--drivers/char/ftape/lowlevel/ftape-format.c344
-rw-r--r--drivers/char/ftape/lowlevel/ftape-format.h37
-rw-r--r--drivers/char/ftape/lowlevel/ftape-init.c161
-rw-r--r--drivers/char/ftape/lowlevel/ftape-init.h43
-rw-r--r--drivers/char/ftape/lowlevel/ftape-io.c992
-rw-r--r--drivers/char/ftape/lowlevel/ftape-io.h90
-rw-r--r--drivers/char/ftape/lowlevel/ftape-proc.c215
-rw-r--r--drivers/char/ftape/lowlevel/ftape-proc.h35
-rw-r--r--drivers/char/ftape/lowlevel/ftape-read.c621
-rw-r--r--drivers/char/ftape/lowlevel/ftape-read.h51
-rw-r--r--drivers/char/ftape/lowlevel/ftape-rw.c1092
-rw-r--r--drivers/char/ftape/lowlevel/ftape-rw.h111
-rw-r--r--drivers/char/ftape/lowlevel/ftape-setup.c105
-rw-r--r--drivers/char/ftape/lowlevel/ftape-tracing.c118
-rw-r--r--drivers/char/ftape/lowlevel/ftape-tracing.h180
-rw-r--r--drivers/char/ftape/lowlevel/ftape-write.c336
-rw-r--r--drivers/char/ftape/lowlevel/ftape-write.h53
-rw-r--r--drivers/char/ftape/lowlevel/ftape_syms.c88
-rw-r--r--drivers/char/ftape/zftape/Makefile36
-rw-r--r--drivers/char/ftape/zftape/zftape-buffers.c149
-rw-r--r--drivers/char/ftape/zftape/zftape-buffers.h55
-rw-r--r--drivers/char/ftape/zftape/zftape-ctl.c1418
-rw-r--r--drivers/char/ftape/zftape/zftape-ctl.h59
-rw-r--r--drivers/char/ftape/zftape/zftape-eof.c199
-rw-r--r--drivers/char/ftape/zftape/zftape-eof.h52
-rw-r--r--drivers/char/ftape/zftape/zftape-init.c403
-rw-r--r--drivers/char/ftape/zftape/zftape-init.h77
-rw-r--r--drivers/char/ftape/zftape/zftape-read.c377
-rw-r--r--drivers/char/ftape/zftape/zftape-read.h53
-rw-r--r--drivers/char/ftape/zftape/zftape-rw.c376
-rw-r--r--drivers/char/ftape/zftape/zftape-rw.h102
-rw-r--r--drivers/char/ftape/zftape/zftape-vtbl.c757
-rw-r--r--drivers/char/ftape/zftape/zftape-vtbl.h227
-rw-r--r--drivers/char/ftape/zftape/zftape-write.c483
-rw-r--r--drivers/char/ftape/zftape/zftape-write.h38
-rw-r--r--drivers/char/ftape/zftape/zftape_syms.c43
-rw-r--r--drivers/char/generic_nvram.c145
-rw-r--r--drivers/char/generic_serial.c1001
-rw-r--r--drivers/char/genrtc.c535
-rw-r--r--drivers/char/hangcheck-timer.c129
-rw-r--r--drivers/char/hpet.c994
-rw-r--r--drivers/char/hvc_console.c831
-rw-r--r--drivers/char/hvcs.c1649
-rw-r--r--drivers/char/hvsi.c1320
-rw-r--r--drivers/char/hw_random.c630
-rw-r--r--drivers/char/i8k.c788
-rw-r--r--drivers/char/ip2.c110
-rw-r--r--drivers/char/ip2/fip_firm.h2149
-rw-r--r--drivers/char/ip2/i2cmd.c209
-rw-r--r--drivers/char/ip2/i2cmd.h643
-rw-r--r--drivers/char/ip2/i2ellis.c1487
-rw-r--r--drivers/char/ip2/i2ellis.h615
-rw-r--r--drivers/char/ip2/i2hw.h648
-rw-r--r--drivers/char/ip2/i2lib.c2219
-rw-r--r--drivers/char/ip2/i2lib.h351
-rw-r--r--drivers/char/ip2/i2os.h127
-rw-r--r--drivers/char/ip2/i2pack.h364
-rw-r--r--drivers/char/ip2/ip2.h107
-rw-r--r--drivers/char/ip2/ip2ioctl.h35
-rw-r--r--drivers/char/ip2/ip2trace.h42
-rw-r--r--drivers/char/ip2/ip2types.h57
-rw-r--r--drivers/char/ip27-rtc.c327
-rw-r--r--drivers/char/ip2main.c3265
-rw-r--r--drivers/char/ipmi/Kconfig67
-rw-r--r--drivers/char/ipmi/Makefile15
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c513
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c582
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c500
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3174
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c549
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2359
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h120
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c599
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1068
-rw-r--r--drivers/char/isicom.c2079
-rw-r--r--drivers/char/istallion.c5276
-rw-r--r--drivers/char/ite_gpio.c419
-rw-r--r--drivers/char/keyboard.c1254
-rw-r--r--drivers/char/lcd.c683
-rw-r--r--drivers/char/lcd.h186
-rw-r--r--drivers/char/lp.c995
-rw-r--r--drivers/char/mem.c880
-rw-r--r--drivers/char/misc.c331
-rw-r--r--drivers/char/mmtimer.c725
-rw-r--r--drivers/char/moxa.c3243
-rw-r--r--drivers/char/mwave/3780i.c727
-rw-r--r--drivers/char/mwave/3780i.h362
-rw-r--r--drivers/char/mwave/Makefile15
-rw-r--r--drivers/char/mwave/README50
-rw-r--r--drivers/char/mwave/mwavedd.c674
-rw-r--r--drivers/char/mwave/mwavedd.h150
-rw-r--r--drivers/char/mwave/mwavepub.h89
-rw-r--r--drivers/char/mwave/smapi.c570
-rw-r--r--drivers/char/mwave/smapi.h80
-rw-r--r--drivers/char/mwave/tp3780i.c592
-rw-r--r--drivers/char/mwave/tp3780i.h103
-rw-r--r--drivers/char/mxser.c3170
-rw-r--r--drivers/char/mxser.h450
-rw-r--r--drivers/char/n_hdlc.c978
-rw-r--r--drivers/char/n_r3964.c1416
-rw-r--r--drivers/char/n_tty.c1562
-rw-r--r--drivers/char/nvram.c926
-rw-r--r--drivers/char/nwbutton.c248
-rw-r--r--drivers/char/nwbutton.h40
-rw-r--r--drivers/char/nwflash.c702
-rw-r--r--drivers/char/pcmcia/Kconfig22
-rw-r--r--drivers/char/pcmcia/Makefile7
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4611
-rw-r--r--drivers/char/ppdev.c824
-rw-r--r--drivers/char/pty.c412
-rw-r--r--drivers/char/qtronix.c601
-rw-r--r--drivers/char/qtronixmap.c_shipped265
-rw-r--r--drivers/char/qtronixmap.map287
-rw-r--r--drivers/char/random.c1629
-rw-r--r--drivers/char/raw.c342
-rw-r--r--drivers/char/rio/Makefile12
-rw-r--r--drivers/char/rio/board.h143
-rw-r--r--drivers/char/rio/bootpkt.h62
-rw-r--r--drivers/char/rio/brates.h107
-rw-r--r--drivers/char/rio/chan.h33
-rw-r--r--drivers/char/rio/cirrus.h463
-rw-r--r--drivers/char/rio/cmd.h84
-rw-r--r--drivers/char/rio/cmdblk.h60
-rw-r--r--drivers/char/rio/cmdpkt.h206
-rw-r--r--drivers/char/rio/control.h62
-rw-r--r--drivers/char/rio/daemon.h334
-rw-r--r--drivers/char/rio/data.h40
-rw-r--r--drivers/char/rio/debug.h39
-rw-r--r--drivers/char/rio/defaults.h59
-rw-r--r--drivers/char/rio/eisa.h104
-rw-r--r--drivers/char/rio/enable.h50
-rw-r--r--drivers/char/rio/error.h85
-rw-r--r--drivers/char/rio/errors.h104
-rw-r--r--drivers/char/rio/formpkt.h154
-rw-r--r--drivers/char/rio/func.h154
-rw-r--r--drivers/char/rio/host.h134
-rw-r--r--drivers/char/rio/hosthw.h57
-rw-r--r--drivers/char/rio/link.h188
-rw-r--r--drivers/char/rio/linux_compat.h122
-rw-r--r--drivers/char/rio/list.h196
-rw-r--r--drivers/char/rio/lrt.h55
-rw-r--r--drivers/char/rio/ltt.h55
-rw-r--r--drivers/char/rio/lttwake.h53
-rw-r--r--drivers/char/rio/map.h103
-rw-r--r--drivers/char/rio/mca.h73
-rw-r--r--drivers/char/rio/mesg.h41
-rw-r--r--drivers/char/rio/param.h61
-rw-r--r--drivers/char/rio/parmmap.h96
-rw-r--r--drivers/char/rio/pci.h76
-rw-r--r--drivers/char/rio/phb.h293
-rw-r--r--drivers/char/rio/pkt.h120
-rw-r--r--drivers/char/rio/poll.h76
-rw-r--r--drivers/char/rio/port.h245
-rw-r--r--drivers/char/rio/proto.h244
-rw-r--r--drivers/char/rio/protsts.h119
-rw-r--r--drivers/char/rio/qbuf.h67
-rw-r--r--drivers/char/rio/rio.h294
-rw-r--r--drivers/char/rio/rio_linux.c1380
-rw-r--r--drivers/char/rio/rio_linux.h187
-rw-r--r--drivers/char/rio/rioboard.h281
-rw-r--r--drivers/char/rio/rioboot.c1360
-rw-r--r--drivers/char/rio/riocmd.c1041
-rw-r--r--drivers/char/rio/rioctrl.c1869
-rw-r--r--drivers/char/rio/riodrvr.h144
-rw-r--r--drivers/char/rio/rioinfo.h96
-rw-r--r--drivers/char/rio/rioinit.c1617
-rw-r--r--drivers/char/rio/riointr.c951
-rw-r--r--drivers/char/rio/rioioctl.h103
-rw-r--r--drivers/char/rio/riolocks.h43
-rw-r--r--drivers/char/rio/rioparam.c744
-rw-r--r--drivers/char/rio/riopcicopy.c8
-rw-r--r--drivers/char/rio/rioroute.c1238
-rw-r--r--drivers/char/rio/riospace.h161
-rw-r--r--drivers/char/rio/riotable.c1044
-rw-r--r--drivers/char/rio/riotime.h63
-rw-r--r--drivers/char/rio/riotty.c1376
-rw-r--r--drivers/char/rio/riotypes.h135
-rw-r--r--drivers/char/rio/riowinif.h1335
-rw-r--r--drivers/char/rio/riscos.h63
-rw-r--r--drivers/char/rio/rom.h64
-rw-r--r--drivers/char/rio/route.h108
-rw-r--r--drivers/char/rio/rtahw.h75
-rw-r--r--drivers/char/rio/rup.h82
-rw-r--r--drivers/char/rio/rupstat.h51
-rw-r--r--drivers/char/rio/sam.h74
-rw-r--r--drivers/char/rio/selftest.h73
-rw-r--r--drivers/char/rio/space.h45
-rw-r--r--drivers/char/rio/sysmap.h63
-rw-r--r--drivers/char/rio/timeouts.h51
-rw-r--r--drivers/char/rio/top.h49
-rw-r--r--drivers/char/rio/typdef.h82
-rw-r--r--drivers/char/rio/unixrup.h56
-rw-r--r--drivers/char/riscom8.c1809
-rw-r--r--drivers/char/riscom8.h102
-rw-r--r--drivers/char/riscom8_reg.h254
-rw-r--r--drivers/char/rocket.c3299
-rw-r--r--drivers/char/rocket.h111
-rw-r--r--drivers/char/rocket_int.h1296
-rw-r--r--drivers/char/rtc.c1354
-rw-r--r--drivers/char/s3c2410-rtc.c588
-rw-r--r--drivers/char/scan_keyb.c149
-rw-r--r--drivers/char/scan_keyb.h15
-rw-r--r--drivers/char/scc.h613
-rw-r--r--drivers/char/scx200_gpio.c149
-rw-r--r--drivers/char/selection.c306
-rw-r--r--drivers/char/ser_a2232.c825
-rw-r--r--drivers/char/ser_a2232.h202
-rw-r--r--drivers/char/ser_a2232fw.ax529
-rw-r--r--drivers/char/ser_a2232fw.h306
-rw-r--r--drivers/char/serial167.c2858
-rw-r--r--drivers/char/snsc.c448
-rw-r--r--drivers/char/snsc.h50
-rw-r--r--drivers/char/sonypi.c1403
-rw-r--r--drivers/char/specialix.c2610
-rw-r--r--drivers/char/specialix_io8.h149
-rw-r--r--drivers/char/stallion.c5197
-rw-r--r--drivers/char/sx.c2621
-rw-r--r--drivers/char/sx.h202
-rw-r--r--drivers/char/sxboards.h206
-rw-r--r--drivers/char/sxwindow.h393
-rw-r--r--drivers/char/synclink.c8214
-rw-r--r--drivers/char/synclinkmp.c5671
-rw-r--r--drivers/char/sysrq.c432
-rw-r--r--drivers/char/tb0219.c347
-rw-r--r--drivers/char/tipar.c564
-rw-r--r--drivers/char/toshiba.c532
-rw-r--r--drivers/char/tpm/Kconfig39
-rw-r--r--drivers/char/tpm/Makefile7
-rw-r--r--drivers/char/tpm/tpm.c697
-rw-r--r--drivers/char/tpm/tpm.h93
-rw-r--r--drivers/char/tpm/tpm_atmel.c216
-rw-r--r--drivers/char/tpm/tpm_nsc.c373
-rw-r--r--drivers/char/tty_io.c2980
-rw-r--r--drivers/char/tty_ioctl.c551
-rw-r--r--drivers/char/vc_screen.c509
-rw-r--r--drivers/char/viocons.c1195
-rw-r--r--drivers/char/viotape.c1129
-rw-r--r--drivers/char/vme_scc.c1056
-rw-r--r--drivers/char/vr41xx_rtc.c709
-rw-r--r--drivers/char/vt.c3242
-rw-r--r--drivers/char/vt_ioctl.c1201
-rw-r--r--drivers/char/watchdog/Kconfig549
-rw-r--r--drivers/char/watchdog/Makefile42
-rw-r--r--drivers/char/watchdog/acquirewdt.c332
-rw-r--r--drivers/char/watchdog/advantechwdt.c333
-rw-r--r--drivers/char/watchdog/alim1535_wdt.c463
-rw-r--r--drivers/char/watchdog/alim7101_wdt.c421
-rw-r--r--drivers/char/watchdog/cpu5wdt.c303
-rw-r--r--drivers/char/watchdog/eurotechwdt.c474
-rw-r--r--drivers/char/watchdog/i8xx_tco.c535
-rw-r--r--drivers/char/watchdog/i8xx_tco.h42
-rw-r--r--drivers/char/watchdog/ib700wdt.c352
-rw-r--r--drivers/char/watchdog/indydog.c221
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c219
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c230
-rw-r--r--drivers/char/watchdog/machzwd.c501
-rw-r--r--drivers/char/watchdog/mixcomwd.c306
-rw-r--r--drivers/char/watchdog/mpc8xx_wdt.c164
-rw-r--r--drivers/char/watchdog/pcwd.c926
-rw-r--r--drivers/char/watchdog/pcwd_pci.c677
-rw-r--r--drivers/char/watchdog/pcwd_usb.c796
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c516
-rw-r--r--drivers/char/watchdog/sa1100_wdt.c223
-rw-r--r--drivers/char/watchdog/sbc60xxwdt.c413
-rw-r--r--drivers/char/watchdog/sc1200wdt.c467
-rw-r--r--drivers/char/watchdog/sc520_wdt.c447
-rw-r--r--drivers/char/watchdog/scx200_wdt.c274
-rw-r--r--drivers/char/watchdog/shwdt.c452
-rw-r--r--drivers/char/watchdog/softdog.c309
-rw-r--r--drivers/char/watchdog/w83627hf_wdt.c362
-rw-r--r--drivers/char/watchdog/w83877f_wdt.c426
-rw-r--r--drivers/char/watchdog/wafer5823wdt.c330
-rw-r--r--drivers/char/watchdog/wd501p.h52
-rw-r--r--drivers/char/watchdog/wdt.c647
-rw-r--r--drivers/char/watchdog/wdt285.c229
-rw-r--r--drivers/char/watchdog/wdt977.c459
-rw-r--r--drivers/char/watchdog/wdt_pci.c763
439 files changed, 264246 insertions, 0 deletions
diff --git a/drivers/char/ChangeLog b/drivers/char/ChangeLog
new file mode 100644
index 00000000000..56b8a2e76ab
--- /dev/null
+++ b/drivers/char/ChangeLog
@@ -0,0 +1,775 @@
+2001-08-11 Tim Waugh <twaugh@redhat.com>
+
+ * serial.c (get_pci_port): Deal with awkward Titan cards.
+
+1998-08-26 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (rs_open): Correctly decrement the module in-use count
+ on errors.
+
+Thu Feb 19 14:24:08 1998 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (tty_name): Remove the non-reentrant (and non-SMP safe)
+ version of tty_name, and rename the reentrant _tty_name
+ function to be tty_name.
+ (tty_open): Add a warning message stating callout devices
+ are deprecated.
+
+Mon Dec 1 08:24:15 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (tty_get_baud_rate): Print a warning syslog if the
+ tty->alt_speed kludge is used; this means the system is
+ using the deprecated SPD_HI ioctls.
+
+Mon Nov 24 10:37:49 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c, esp.c, rocket.c: Change drivers to take advantage of
+ tty_get_baud_rate().
+
+ * tty_io.c (tty_get_baud_rate): New function which computes the
+ correct baud rate for the tty. More factoring out of
+ common code out of the serial driver to the high-level tty
+ functions....
+
+Sat Nov 22 07:53:36 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c, esp.c, rocket.c: Add tty->driver.break() routine, and
+ allow high-level tty code to handle the break and soft
+ carrier ioctls.
+
+ * tty_ioctl.c (n_tty_ioctl): Support TIOCGSOFTCAR and
+ TIOCSSOFTCAR, so that device drivers don't have to support
+ it.
+
+ * serial.c (autoconfig): Change 16750 test to hopefully eliminate
+ false results by people with strange 16550As being
+ detected as 16750s. Hopefully 16750s will still be
+ detected as 16750, and other weird UARTs won't get poorly
+ autodetected. If this doesn't work, I'll have to disable
+ the auto identification for the 16750.
+
+ * tty_io.c (tty_hangup): Now actually do the tty hangup
+ processing during the timer processing, and disable
+ interrupts while doing the hangup processing. This avoids
+ several nasty race conditions which happened when the
+ hangup processing was done asynchronously.
+ (tty_ioctl): Do break handling in the tty driver if
+ driver's break function is supported.
+ (tty_flip_buffer_push): New exported function which should
+ be used by drivers to push characters in the flip buffer
+ to the tty handler. This may either be done using a task
+ queue function for better CPU efficiency, or directly for
+ low latency operation.
+
+ * serial.c (rs_set_termios): Fix bug rs_set_termios when
+ transitioning away from B0, submitted by Stanislav
+ Voronyi.
+
+Thu Jun 19 20:05:58 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (begin_break, end_break, rs_ioctl): Applied patch
+ to support BSD ioctls to set and clear the break
+ condition explicitly.
+
+ * console.c (scrup, scrdown, insert_line, delete_line): Applied
+ fix suggested by Aaron Tiensivu to speed up block scrolls
+ up and down.
+
+ * n_tty.c (opost_block, write_chan): Added a modified "fast
+ console" patch which processes a block of text via
+ "cooking" efficiently.
+
+Wed Jun 18 15:25:50 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (init_dev, release_dev): Applied fix suggested by Bill
+ Hawes to prevent race conditions in the tty code.
+
+ * n_tty.c (n_tty_chars_in_buffer): Applied fix suggested by Bill
+ Hawes so that n_tty_chars_in_buffer returns the correct
+ value in the case when the tty is in cannonical mode. (To
+ avoid a pty deadlock with telnetd.)
+
+Thu Feb 27 01:53:08 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (change_speed): Add support for the termios flag
+ CMSPAR, which allows the user to select stick parity.
+ (i.e, if PARODD is set, the parity bit is always 1; if
+ PARRODD is not set, then the parity bit is always 0).
+
+Wed Feb 26 19:03:10 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (cleanup_module): Fix memory leak when using the serial
+ driver as a module; make sure tmp_buf gets freed!
+
+Tue Feb 25 11:01:59 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (set_modem_info): Add support for setting and clearing
+ the OUT1 and OUT2 bits. (For special case UART's, usually
+ for half-duplex.)
+ (autoconfig, change_speed): Fix TI 16750 support.
+
+Sun Feb 16 00:14:43 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (release_dev): Add sanity check to make sure there are
+ no waiters on tty->read_wait or tty->write_wait.
+
+ * serial.c (rs_init): Don't autoconfig a device if the I/O region
+ is already reserved.
+
+ * serial.c (serial_proc_info): Add support for /proc/serial.
+
+Thu Feb 13 00:49:10 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (receive_chars): When the UART repotrs an overrun
+ condition, it does so with a valid character. Changed to
+ not throw away the valid character, but instead report the
+ overrun after the valid character.
+
+ * serial.c: Added new #ifdef's for some of the advanced serial
+ driver features. A minimal driver that only supports COM
+ 1/2/3/4 without sharing serial interrupts only takes 17k;
+ the full driver takes 32k.
+
+Wed Feb 12 14:50:44 1997 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * vt.c:
+ * pty.c:
+ * tty_ioctl.c:
+ * serial.c: Update routines to use the new 2.1 memory access
+ routines.
+
+Wed Dec 4 07:51:52 1996 Theodore Ts'o <tytso@localhost.mit.edu>
+
+ * serial.c (change_speed): Use save_flags(); cli() and
+ restore_flags() in order to ensure we don't accidentally
+ turn on interrupts when starting up the port.
+ (startup): Move the insertion of serial structure into the
+ IRQ chain earlier into the startup processing. Interrupts
+ should be off this whole time, but we eventually will want
+ to reduce this window.
+
+Thu Nov 21 10:05:22 1996 Theodore Ts'o <tytso@localhost.mit.edu>
+
+ * tty_ioctl.c (tty_wait_until_sent): Always check the driver
+ wait_until_ready routine, even if there are no characters
+ in the xmit buffer. (There may be charactes in the device
+ FIFO.)
+ (n_tty_ioctl): Add new flag tty->flow_stopped which
+ indicates whether the tty is stopped due to a request by
+ the TCXONC ioctl (used by tcflow). If so, don't let an
+ incoming XOFF character restart the tty. The tty can only
+ be restarted by another TCXONC request.
+
+ * tty_io.c (start_tty): Don't allow the tty to be restarted if
+ tty->flow_stopped is true.
+
+ * n_tty.c (n_tty_receive_char): If tty->flow_stopped is true, and
+ IXANY is set, don't eat a character trying to restart the
+ tty.
+
+ * serial.c (startup): Remove need for MCR_noint from the
+ async_struct structure. Only turn on DTR and RTS if the
+ baud rate is not zero.
+ (change_speed): More accurately calculate the timeout
+ value based on the word size. Move responsibility of
+ hangup when speed becomes B0 to rs_set_termios()
+ (set_serial_info): When changing the UART type set the
+ current xmit_fifo_size as well as the permanent
+ xmit_fifo_size.
+ (rs_ioctl): Fix TCSBRK (used by tcdrain) and TCSBRKP
+ ioctls to return EINTR if interrupted by a signal.
+ (rs_set_termios): If the baud rate changes to or from B0,
+ this function is now responsible for setting or clearing
+ DTR and RTS. DTR and RTS are only be changed on the
+ transition to or from the B0 state.
+ (rs_close): Wait for the characters to drain based on
+ info->timeout. At low baud rates (50 bps), it may take a
+ long time for the FIFO to completely drain out!
+ (rs_wait_until_sent): Fixed timeout handling. Now
+ releases control to the scheduler, but checks frequently
+ enough so that the function is sensitive enough to pass
+ the timing requirements of the NIST-PCTS.
+ (block_til_ready): When opening the device, don't turn on
+ DTR and RTS if the baud rate is B0.
+
+Thu Nov 14 00:06:09 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c (autoconfig): Fix autoconfiguration problems;
+ info->flags wasn't getting initialized from the state
+ structure. Put in more paranoid test for the 16750.
+
+Fri Nov 8 20:19:50 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * n_tty.c (n_tty_flush_buffer): Only call driver->unthrottle() if
+ the tty was previous throttled.
+ (n_tty_set_termios, write_chan): Add changes suggested by
+ Simon P. Allen to allow hardware cooking.
+
+ * tty_ioctl.c (set_termios): If we get a signal while waiting for
+ the tty to drain, return -EINTR.
+
+ * serial.c (change_speed): Add support for CREAD, as required by
+ POSIX.
+
+Sat Nov 2 20:43:10 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * serial.c: Wholesale changes. Added support for the Startech
+ 16650 and 16650V2 chips. (WARNING: the new startech
+ 16650A may or may not work!) Added support for the
+ TI16750 (not yet tested). Split async_struct into a
+ transient part (async_struct) and a permanent part
+ (serial_state) which contains the configuration
+ information for the ports. Added new driver routines
+ wait_until_sent() and send_xchar() to help with POSIX
+ compliance. Added support for radio clocks which waggle
+ the carrier detect line (CONFIG_HARD_PPS).
+
+ * tty_ioctl.c (tty_wait_until_sent): Added call to new driver
+ function tty->driver.wait_until_sent(), which returns when
+ the tty's device xmit buffers are drained. Needed for
+ full POSIX compliance.
+
+ (send_prio_char): New function, called by the ioctl's
+ TCIOFF and TCION; uses the new driver call send_xchar(),
+ which will send the XON or XOFF character at high priority
+ (and even if tty output is stopped).
+
+Wed Jun 5 18:52:04 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * pty.c (pty_close): When closing a pty, make sure packet mode is
+ cleared.
+
+Sun May 26 09:33:52 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * vesa_blank.c (set_vesa_blanking): Add missing verify_area() call.
+
+ * selection.c (set_selection): Add missing verify_area() call.
+
+ * tty_io.c (tty_ioctl): Add missing verify_area() calls.
+
+ * serial.c (rs_ioctl): Add missing verify_area() calls.
+ (rs_init): Allow initialization of serial driver
+ configuration from a module.
+
+ * random.c (extract_entropy): Add missing verify_area call.
+ Don't limit number of characters returned to
+ 32,768. Extract entropy is now no longer a inlined
+ function.
+
+ (random_read): Check return value in case extract_entropy
+ returns an error.
+
+ (secure_tcp_sequence_number): New function which returns a
+ secure TCP sequence number. This is needed to prevent some
+ nasty TCP hijacking attacks.
+
+ (init_std_data): Initialize using gettimeofday() instead of
+ struct timeval xtime.
+
+ (fast_add_entropy_word, add_entropy_word): Rename the
+ inline function add_entropy_word() to
+ fast_add_entropy_word(). Make add_entropy_word() be the
+ non-inlined function which is used in non-timing critical
+ places, in order to save space.
+
+ (initialize_benchmark, begin_benchmark, end_benchmark): New
+ functions defined when RANDOM_BENCHMARK is defined. They
+ allow us to benchmark the speed of the
+ add_timer_randomness() call.
+
+ (int_ln, rotate_left): Add two new inline functions with
+ i386 optimized asm instructions. This speeds up the
+ critical add_entropy_word() and add_timer_randomness()
+ functions, which are called from interrupt handlers.
+
+Tue May 7 22:51:11 1996 <tytso@rsts-11.mit.edu>
+
+ * random.c (add_timer_randomness): Limit the amount randomness
+ that we estimate to 12 bits. (An arbitrary amount).
+
+ (extract_entropy): To make it harder to analyze the hash
+ function, fold the hash function in half using XOR, and
+ use the folded result as the value to emit to the user.
+ Also, add timer randomness each pass through the
+ exact_entropy call, to increase the amount of unknown
+ values during the extraction process.
+
+ (random_ioctl): Use IOR/IOW definitions to define the
+ ioctl values used by the /dev/random driver. Allow the
+ old ioctl values to be used for backwards compatibility
+ (for a limited amount of time).
+
+Wed Apr 24 14:02:04 1996 Theodore Ts'o <tytso@rsts-11.mit.edu>
+
+ * random.c (add_timer_randomness): Use 2nd derivative as well to
+ better estimate entropy.
+
+ (rand_initialize): Explicitly initialize all the pointers
+ to NULL. (Clearing pointers using memset isn't portable.)
+ Initialize the random pool with OS-dependent data.
+
+ (random_write): Add sanity checking to the arguments to
+ random_write(), so that bad arguments won't cause a kernel
+ SEGV.
+
+ (random_read): Update the access time of the device inode
+ when you return data to the user.
+
+ (random_ioctl): Wake up the random_wait channel when there
+ are only WAIT_INPUT_BITS available. Add more paranoia
+ checks to make sure entropy_count doesn't go beyond the
+ bounds of (0, POOLSIZE). Add a few missing verify_area
+ checks. Add support for the RNDCLEARPOOL ioctl, which
+ zaps the random pool.
+
+ (add_timer_randomness): Wake up the random_wait
+ channel only when there are WAIT_INPUT_BITS available.
+
+ (random_select): Allow a random refresh daemon process to
+ select on /dev/random for writing; wake up the daemon when
+ there are less than WAIT_OUTPUT_BITS bits of randomness
+ available.
+
+Tue Apr 23 22:56:07 1996 <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (init_dev): Change return code when user attempts to
+ open master pty which is already open from EAGAIN to EIO,
+ to match with BSD expectations. EIO is more correct
+ anyway, since EAGAIN implies that retrying will be
+ successful --- which it might be.... Eventually!!
+
+ * pty.c (pty_open, pty_close): Fix wait loop so that we don't
+ busy loop while waiting for the master side to open.
+ Fix tty opening/closing logic. TTY_SLAVE_CLOSED was
+ renamed to TTY_OTHER_CLOSED, so that the name is more
+ descriptive. Also fixed code so that the tty flag
+ actually works correctly now....
+
+Mon Apr 1 10:22:01 1996 <tytso@rsts-11.mit.edu>
+
+ * serial.c (rs_close): Cleaned up modularization changes.
+ Remove code which forced line discipline back to N_TTY
+ this is done in the tty upper layers, and there's no
+ reason to do it here. (Making this change also
+ removed the requirement that the serial module access
+ the internal kernel symbol "ldiscs".)
+
+ * tty_io.c (tty_init): Formally register a tty_driver entry for
+ /dev/tty (device 4, 0) and /dev/console (device 5, 0).
+ This guarantees that major device numbers 4 and 5 will be
+ reserved for the tty subsystem (as they have to be because
+ of /dev/tty and /dev/console). Removed tty_regdev, as
+ this interface is no longer necessary.
+
+Sun Mar 17 20:42:47 GMT 1996 <ah@doc.ic.ac.uk>
+
+ * serial.c : modularisation (changes in linux/fs/device.c allow
+ kerneld to automatically load the serial module).
+
+ * Makefile, Config.in : serial modularisation adds.
+
+ * tty_io.c : tty_init_ctty used by to register "cua" driver just
+ for the /dev/tty device (5,0). Added tty_regdev.
+
+ * serial.c (shutdown, rs_ioctl) : when port shuts down wakeup processes
+ waiting on delta_msr_wait. The TIOCMIWAIT ioctl returns EIO
+ if no change was done since the time of call.
+
+Sat Mar 16 14:33:13 1996 <aeb@cwi.nl>
+
+ * tty_io.c (disassociate_ctty): If disassociate_ctty is called by
+ exit, do not perform an implicit vhangup on a pty.
+
+Fri Feb 9 14:15:47 1996 <tytso@rsts-11.mit.edu>
+
+ * serial.c (block_til_ready): Fixed another race condition which
+ happens if a hangup happens during the open.
+
+Wed Jan 10 10:08:00 1996 <tytso@rsts-11.mit.edu>
+
+ * serial.c (block_til_ready): Remove race condition which happened
+ if a hangup condition happened during the setup of the
+ UART, before rs_open() called block_til_ready(). This
+ caused the info->count counter to be erroneously
+ decremented.
+
+ * serial.c (startup, rs_open): Remove race condition that could
+ cause a memory leak of one page. (Fortunately, both race
+ conditions were relatively rare in practice.)
+
+Tue Dec 5 13:21:27 1995 <tytso@rsts-11.mit.edu>
+
+ * serial.c (check_modem_status, rs_ioctl): Support the new
+ ioctl()'s TIOCGICOUNT, TIOCMIWAIT. These allow an
+ application program to wait on a modem serial register
+ status bit change, and to find out how many changes have
+ taken place for the MSR bits.
+
+ (rs_write): Eliminate a race condition which is introduced
+ if it is necessary to wait for the semaphore.
+
+Sat Nov 4 17:14:45 1995 <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (tty_init): Move registration of TTY_MAJOR and
+ TTY_AUX_MAJOR to the end, so that /proc/devices looks
+ prettier.
+
+ * pty.c (pty_init): Use new major numbers for PTY master and slave
+ devices. This allow us to have more than 64 pty's. We
+ register the old pty devices for backwards compatibility.
+ Note that a system should either be using the old pty
+ devices or the new pty devices --- in general, it should
+ try to use both, since they map into the same pty table.
+ The old pty devices are strictly for backwards compatibility.
+
+Wed Oct 11 12:45:24 1995 <tytso@rsts-11.mit.edu>
+
+ * tty_io.c (disassociate_ctty): If disassociate_ctty is called by
+ exit, perform an implicit vhangup on the tty.
+
+ * pty.c (pty_close): When the master pty is closed, send a hangup
+ to the slave pty.
+ (pty_open): Use the flag TTY_SLAVE_CLOSED to test to see
+ if there are any open slave ptys, instead of using
+ tty->link->count. The old method got confused if there
+ were processes that had hung-up file descriptors on the
+ slave tty.
+
+Tue May 2 00:53:25 1995 <tytso@rsx-11.mit.edu>
+
+ * tty_io.c (tty_set_ldisc): Wait until the output buffer is
+ drained before closing the old line discipline --- needed
+ in only one case: XON/XOFF processing.
+
+ * n_tty.c (n_tty_close): Don't bother waiting until the output
+ driver is closed; in general, the line discipline
+ shouldn't care if the hardware is finished
+ transmitting before the line discipline terminates.
+
+ * tty_io.c (release_dev): Shutdown the line discipline after
+ decrementing the tty count variable; but set the
+ TTY_CLOSING flag so that we know that this tty structure
+ isn't long for this world.
+
+ * tty_io.c (init_dev): Add sanity code to check to see if
+ TTY_CLOSING is set on a tty structure; if so, something
+ bad has happened (probably a line discipline close blocked
+ when it shouldn't have; so do a kernel printk and then
+ return an error).
+
+Wed Apr 26 10:23:44 1995 Theodore Y. Ts'o <tytso@localhost>
+
+ * tty_io.c (release_dev): Try to shutdown the line discipline
+ *before* decrementing the tty count variable; this removes
+ a potential race condition which occurs when the line
+ discipline close blocks, and another process then tries
+ open the same serial port.
+
+ * serial.c (rs_hangup): When hanging up, flush the output buffer
+ before shutting down the UART. Otherwise the line
+ discipline close blocks waiting for the characters to get
+ flushed, which never happens until the serial port gets reused.
+
+Wed Apr 12 08:06:16 1995 Theodore Y. Ts'o <tytso@localhost>
+
+ * serial.c (do_serial_hangup, do_softint, check_modem_status,
+ rs_init): Hangups are now scheduled via a separate tqueue
+ structure in the async_struct structure, tqueue_hangup.
+ This task is pushed on to the tq_schedule queue, so that
+ it is processed synchronously by the scheduler.
+
+Sat Feb 18 12:13:51 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (disassociate_ctty, tty_open, tty_ioctl): Clear
+ current->tty_old_pgrp field when a session leader
+ acquires a controlling tty, and after a session leader
+ has disassociated from a controlling tty.
+
+Fri Feb 17 09:34:09 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_interrupt_single, rs_interrupt, rs_interrupt_multi):
+ Change the number of passes made from 64 to be 256,
+ configurable with the #define RS_ISR_PASS_LIMIT.
+
+ * serial.c (rs_init, set_serial_info, get_serial_info, rs_close):
+ Remove support for closing_wait2. Instead, set
+ tty->closing and rely on the line discipline to prevent
+ echo wars.
+
+ * n_tty.c (n_tty_receive_char): IEXTEN does not need to be
+ enabled in order for IXANY to be active.
+
+ If tty->closing is set, then only process XON and XOFF
+ characters.
+
+Sun Feb 12 23:57:48 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_timer): Change the interrupt poll time from 60
+ seconds to 10 seconds, configurable with the #define
+ RS_STROBE_TIME.
+
+ * serial.c (rs_interrupt_multi, startup, shutdown, rs_ioctl,
+ set_multiport_struct, get_multiport_struct): Add
+ provisions for a new type of interrupt service routine,
+ which better supports multiple serial ports on a single
+ IRQ.
+
+Sun Feb 5 19:35:11 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_ioctl.c (n_tty_ioctl, set_termios, tty_wait_until_sent):
+ * serial.c (rs_ioctl, rs_close):
+ * cyclades.c (cy_ioctl, cy_close):
+ * n_tty.c (n_tty_close): Rename wait_until_sent to
+ tty_wait_until_sent, so that it's a better name to export
+ in ksyms.c.
+
+Sat Feb 4 23:36:20 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_close): Added missing check for closing_wait2 being
+ ASYNC_CLOSING_WAIT_NONE.
+
+Thu Jan 26 09:02:49 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_init, set_serial_info, get_serial_info,
+ rs_close): Support close_wait in the serial driver.
+ This is helpful for slow devices (like serial
+ plotters) so that their outputs don't get flushed upon
+ device close. This has to be configurable because
+ normally we don't want ports to be hung up for long
+ periods of time during a close when they are not
+ connected to a device, or the device is powered off.
+
+ The default is to wait 30 seconds; in the case of a
+ very slow device, the close_wait timeout should be
+ lengthened. If it is set to 0, the kernel will wait
+ forever for all of the data to be transmitted.
+
+Thu Jan 17 01:17:20 1995 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (startup, change_speed, rs_init): Add support to detect
+ the StarTech 16650 chip. Treat it as a 16450 for now,
+ because of its FIFO bugs.
+
+Thu Jan 5 21:21:57 1995 <dahinds@users.sourceforge.net>
+
+ * serial.c: (receive_char): Added counter to prevent infinite loop
+ when a PCMCIA serial device is ejected.
+
+Thu Dec 29 17:53:48 1994 <tytso@rsx-11.mit.edu>
+
+ * tty_io.c (check_tty_count): New procedure which checks
+ tty->count to make sure that it matches with the number of
+ open file descriptors which point at the structure. If
+ the number doesn't match, it prints a warning message.
+
+Wed Dec 28 15:41:51 1994 <tytso@rsx-11.mit.edu>
+
+ * tty_io.c (do_tty_hangup, disassociate_ctty): At hangup time,
+ save the tty's current foreground process group in the
+ session leader's task structure. When the session leader
+ terminates, send a SIGHUP, SIGCONT to that process group.
+ This is not required by POSIX, but it's not prohibited
+ either, and it appears to be the least intrusive way
+ to fix a problem that dialup servers have with
+ orphaned process groups caused by modem hangups.
+
+Thu Dec 8 14:52:11 1994 <tytso@rsx-11.mit.edu>
+
+ * serial.c (rs_ioctl): Don't allow most ioctl's if the serial port
+ isn't initialized.
+
+ * serial.c (rs_close): Don't clear the IER if the serial port
+ isn't initialized.
+
+ * serial.c (block_til_ready): Don't try to block on the dialin
+ port if the serial port isn't initialized.
+
+Wed Dec 7 10:48:30 1994 Si Park (si@wimpol.demon.co.uk)
+ * tty_io.c (tty_register_driver): Fix bug when linking onto
+ the tty_drivers list. We now test that there are elements
+ already on the list before setting the back link from the
+ first element to the new driver.
+
+ * tty_io.c (tty_unregister_driver): Fix bug in unlinking the
+ specified driver from the tty_drivers list. We were not
+ setting the back link correctly. This used to result in
+ a dangling back link pointer and cause panics on the next
+ call to get_tty_driver().
+
+Tue Nov 29 10:21:09 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (tty_unregister_driver): Fix bug in
+ tty_unregister_driver where the pointer to the refcount is
+ tested, instead of the refcount itself. This caused
+ tty_unregister_driver to always return EBUSY.
+
+Sat Nov 26 11:59:24 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (tty_ioctl): Add support for the new ioctl
+ TIOCTTYGSTRUCT, which allow a kernel debugging program
+ direct read access to the tty and tty_driver structures.
+
+Fri Nov 25 17:26:22 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_set_termios): Don't wake up processes blocked in
+ open when the CLOCAL flag changes, since a blocking
+ open only samples the CLOCAL flag once when it blocks,
+ and doesn't check it again. (n.b. FreeBSD has a
+ different behavior for blocking opens; it's not clear
+ whether Linux or FreeBSD's interpretation is correct.
+ POSIX doesn't give clear guidance on this issue, so
+ this may change in the future....)
+
+ * serial.c (block_til_ready): Use the correct termios structure to
+ check the CLOCAL flag. If the cuaXX device is active,
+ then check the saved termios for the ttySXX device.
+ Otherwise, use the currently active termios structure.
+
+Sun Nov 6 21:05:44 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (change_speed): Add support for direct access of
+ 57,600 and 115,200 bps.
+
+Wed Nov 2 10:32:36 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * n_tty.c (n_tty_receive_room): Only allow excess characters
+ through if we are in ICANON mode *and* there are other no
+ pending lines in the buffer. Otherwise cut and paste over
+ 4k breaks.
+
+Sat Oct 29 18:17:34 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_ioctl, get_lsr_info): Added patch suggested by Arne
+ Riiber so that user mode programs can tell when the
+ transmitter shift register is empty.
+
+Thu Oct 27 23:14:29 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_ioctl.c (wait_until_sent): Added debugging printk statements
+ (under the #ifdef TTY_DEBUG_WAIT_UNTIL_SENT)
+
+ * serial.c (rs_interrupt, rs_interrupt_single, receive_chars,
+ change_speed, rs_close): rs_close now disables receiver
+ interrupts when closing the serial port. This allows the
+ serial port to close quickly when Linux and a modem (or a
+ mouse) are engaged in an echo war; when closing the serial
+ port, we now first stop listening to incoming characters,
+ and *then* wait for the transmit buffer to drain.
+
+ In order to make this change, the info->read_status_mask
+ is now used to control what bits of the line status
+ register are looked at in the interrupt routine in all
+ cases; previously it was only used in receive_chars to
+ select a few of the status bits.
+
+Mon Oct 24 23:36:21 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_close): Add a timeout to the transmitter flush
+ loop; this is just a sanity check in case we have flaky
+ (or non-existent-but-configured-by-the-user) hardware.
+
+Fri Oct 21 09:37:23 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (tty_fasync): When asynchronous I/O is enabled, if the
+ process or process group has not be specified yet, set it
+ to be the tty's process group, or if that is not yet set,
+ to the current process's pid.
+
+Thu Oct 20 23:17:28 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * n_tty.c (n_tty_receive_room): If we are doing input
+ canonicalization, let as many characters through as
+ possible, so that the excess characters can be "beeped".
+
+Tue Oct 18 10:02:43 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_start): Removed an incorrect '!' that was
+ preventing transmit interrupts from being re-enabled in
+ rs_start(). Fortunately in most cases it would be
+ re-enabled elsewhere, but this still should be fixed
+ correctly.
+
+Sun Oct 9 23:46:03 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (do_tty_hangup): If the tty driver flags
+ TTY_DRIVER_RESET_TERMIOS is set, then reset the termios
+ settings back to the driver's initial configuration. This
+ allows the termios settings to be reset even if a process
+ has hung up file descriptors keeping a pty's termios from
+ being freed and reset.
+
+ * tty_io.c (release_dev): Fix memory leak. The pty's other
+ termios structure should also be freed.
+
+ * serial.c (rs_close, shutdown): Change how we wait for the
+ transmitter to completely drain before shutting down the
+ serial port. We now do it by scheduling in another
+ process instead of busy looping with the interrupts turned
+ on. This may eliminate some race condition problems that
+ some people seem to be reporting.
+
+Sun Sep 25 14:18:14 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (release_dev): When freeing a tty make sure that both
+ the tty and the o_tty (if present) aren't a process's
+ controlling tty. (Previously, we only checked the tty.)
+
+ * serial.c (change_speed): Only enable the Modem Status
+ Interrupt for a port if CLOCAL is not set or CRTSCTS
+ is set. If we're not checking the carrier detect and
+ CTS line, there's no point in enabling the modem
+ status interrupt. This will save spurious interrupts
+ from slowing down systems who have terminals that
+ don't support either line. (Of course, if you want
+ only one of CD and CTS support, you will need a
+ properly wired serial cable.)
+
+Thu Sep 22 08:32:48 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (do_SAK): Return if tty is null.
+
+ * tty_io.c (_tty_name): Return "NULL tty" if the passed in tty is
+ NULL.
+
+Sat Sep 17 13:19:25 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_ioctl.c (n_tty_ioctl): Fix TIOCGLCKTRMIOS and
+ TIOCSLCKTRMIOS, which were totally broken. Remove
+ extra indirection from argument; it should be a struct
+ termios *, not a struct termios **.
+ &real_tty->termios_locked should have been
+ real_tty->termios_locked. This caused us to be
+ reading and writing the termios_locked structure to
+ random places in kernel memory.
+
+ * tty_io.c (release_dev): Oops! Forgot to delete a critical kfree
+ of the locked_termios. This leaves the locked_termios
+ structure pointed at a freed object.
+
+Fri Sep 16 08:13:25 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * tty_io.c (tty_open): Don't check for an exclusive open until
+ after the device specific open routine has been called.
+ Otherwise, the serial device ref counting will be screwed
+ up.
+
+ * serial.c (rs_open, block_til_ready): Don't set termios structure
+ until after block_til_ready has returned successfully.
+ Modify block_til_ready to check the normal_termios
+ structure directly, so it doesn't rely on termios being
+ set before it's called.
+
+Thu Sep 15 23:34:01 1994 Theodore Y. Ts'o (tytso@rt-11)
+
+ * serial.c (rs_close): Turn off interrupts during rs_close() to
+ prevent a race condition with the hangup code (which
+ runs during a software interrupt).
+
+ * tty_io.c (release_dev): Don't free the locked_termios structure;
+ its state must be retained across device opens.
+
+
+ * tty_io.c (tty_unregister_driver): Added function to unregister a
+ tty driver. (For loadable device drivers.)
+
+
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
new file mode 100644
index 00000000000..096a1202ea0
--- /dev/null
+++ b/drivers/char/Kconfig
@@ -0,0 +1,988 @@
+#
+# Character device configuration
+#
+
+menu "Character devices"
+
+config VT
+ bool "Virtual terminal" if EMBEDDED
+ select INPUT
+ default y if !VIOCONS
+ ---help---
+ If you say Y here, you will get support for terminal devices with
+ display and keyboard devices. These are called "virtual" because you
+ can run several virtual terminals (also called virtual consoles) on
+ one physical terminal. This is rather useful, for example one
+ virtual terminal can collect system messages and warnings, another
+ one can be used for a text-mode user session, and a third could run
+ an X session, all in parallel. Switching between virtual terminals
+ is done with certain key combinations, usually Alt-<function key>.
+
+ The setterm command ("man setterm") can be used to change the
+ properties (such as colors or beeping) of a virtual terminal. The
+ man page console_codes(4) ("man console_codes") contains the special
+ character sequences that can be used to change those properties
+ directly. The fonts used on virtual terminals can be changed with
+ the setfont ("man setfont") command and the key bindings are defined
+ with the loadkeys ("man loadkeys") command.
+
+ You need at least one virtual terminal device in order to make use
+ of your keyboard and monitor. Therefore, only people configuring an
+ embedded system would want to say N here in order to save some
+ memory; the only way to log into such a system is then via a serial
+ or network connection.
+
+ If unsure, say Y, or else you won't be able to do much with your new
+ shiny Linux system :-)
+
+config VT_CONSOLE
+ bool "Support for console on virtual terminal" if EMBEDDED
+ depends on VT
+ default y
+ ---help---
+ The system console is the device which receives all kernel messages
+ and warnings and which allows logins in single user mode. If you
+ answer Y here, a virtual terminal (the device used to interact with
+ a physical terminal) can be used as system console. This is the most
+ common mode of operations, so you should say Y here unless you want
+ the kernel messages be output only to a serial port (in which case
+ you should say Y to "Console on serial port", below).
+
+ If you do say Y here, by default the currently visible virtual
+ terminal (/dev/tty0) will be used as system console. You can change
+ that with a kernel command line option such as "console=tty3" which
+ would use the third virtual terminal as system console. (Try "man
+ bootparam" or see the documentation of your boot loader (lilo or
+ loadlin) about how to pass options to the kernel at boot time.)
+
+ If unsure, say Y.
+
+config HW_CONSOLE
+ bool
+ depends on VT && !S390 && !UML
+ default y
+
+config SERIAL_NONSTANDARD
+ bool "Non-standard serial port support"
+ ---help---
+ Say Y here if you have any non-standard serial boards -- boards
+ which aren't supported using the standard "dumb" serial driver.
+ This includes intelligent serial boards such as Cyclades,
+ Digiboards, etc. These are usually used for systems that need many
+ serial ports because they serve many terminals or dial-in
+ connections.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about non-standard serial boards.
+
+ Most people can say N here.
+
+config COMPUTONE
+ tristate "Computone IntelliPort Plus serial support"
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ ---help---
+ This driver supports the entire family of Intelliport II/Plus
+ controllers with the exception of the MicroChannel controllers and
+ products previous to the Intelliport II. These are multiport cards,
+ which give you many serial ports. You would need something like this
+ to connect more than two modems to your Linux box, for instance in
+ order to become a dial-in server. If you have a card like that, say
+ Y here and read <file:Documentation/computone.txt>.
+
+ To compile this driver as modules, choose M here: the
+ modules will be called ip2 and ip2main.
+
+config ROCKETPORT
+ tristate "Comtrol RocketPort support"
+ depends on SERIAL_NONSTANDARD
+ help
+ This driver supports Comtrol RocketPort and RocketModem PCI boards.
+ These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
+ modems. For information about the RocketPort/RocketModem boards
+ and this driver read <file:Documentation/rocket.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
+
+ If you want to compile this driver into the kernel, say Y here. If
+ you don't have a Comtrol RocketPort/RocketModem card installed, say N.
+
+config CYCLADES
+ tristate "Cyclades async mux support"
+ depends on SERIAL_NONSTANDARD
+ ---help---
+ This driver supports Cyclades Z and Y multiserial boards.
+ You would need something like this to connect more than two modems to
+ your Linux box, for instance in order to become a dial-in server.
+
+ For information about the Cyclades-Z card, read
+ <file:drivers/char/README.cycladesZ>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cyclades.
+
+ If you haven't heard about it, it's safe to say N.
+
+config CYZ_INTR
+ bool "Cyclades-Z interrupt mode operation (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && CYCLADES
+ help
+ The Cyclades-Z family of multiport cards allows 2 (two) driver op
+ modes: polling and interrupt. In polling mode, the driver will check
+ the status of the Cyclades-Z ports every certain amount of time
+ (which is called polling cycle and is configurable). In interrupt
+ mode, it will use an interrupt line (IRQ) in order to check the
+ status of the Cyclades-Z ports. The default op mode is polling. If
+ unsure, say N.
+
+config DIGIEPCA
+ tristate "Digiboard Intelligent Async Support"
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ ---help---
+ This is a driver for Digi International's Xx, Xeve, and Xem series
+ of cards which provide multiple serial ports. You would need
+ something like this to connect more than two modems to your Linux
+ box, for instance in order to become a dial-in server. This driver
+ supports the original PC (ISA) boards as well as PCI, and EISA. If
+ you have a card like this, say Y here and read the file
+ <file:Documentation/digiepca.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called epca.
+
+config ESPSERIAL
+ tristate "Hayes ESP serial port support"
+ depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP
+ help
+ This is a driver which supports Hayes ESP serial ports. Both single
+ port cards and multiport cards are supported. Make sure to read
+ <file:Documentation/hayes-esp.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called esp.
+
+ If unsure, say N.
+
+config MOXA_INTELLIO
+ tristate "Moxa Intellio support"
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ help
+ Say Y here if you have a Moxa Intellio multiport serial card.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moxa.
+
+config MOXA_SMARTIO
+ tristate "Moxa SmartIO support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Say Y here if you have a Moxa SmartIO multiport serial card.
+
+ This driver can also be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called mxser. If you want to do that, say M
+ here.
+
+config ISI
+ tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
+ depends on SERIAL_NONSTANDARD
+ help
+ This is a driver for the Multi-Tech cards which provide several
+ serial ports. The driver is experimental and can currently only be
+ built as a module. The module will be called isicom.
+ If you want to do that, choose M here.
+
+config SYNCLINK
+ tristate "Microgate SyncLink card support"
+ depends on SERIAL_NONSTANDARD && PCI
+ help
+ Provides support for the SyncLink ISA and PCI multiprotocol serial
+ adapters. These adapters support asynchronous and HDLC bit
+ synchronous communication up to 10Mbps (PCI adapter).
+
+ This driver can only be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called synclink. If you want to do that, say M
+ here.
+
+config SYNCLINKMP
+ tristate "SyncLink Multiport support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Enable support for the SyncLink Multiport (2 or 4 ports)
+ serial adapter, running asynchronous and HDLC communications up
+ to 2.048Mbps. Each ports is independently selectable for
+ RS-232, V.35, RS-449, RS-530, and X.21
+
+ This driver may be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called synclinkmp. If you want to do that, say M
+ here.
+
+config N_HDLC
+ tristate "HDLC line discipline support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Allows synchronous HDLC communications with tty device drivers that
+ support synchronous HDLC such as the Microgate SyncLink adapter.
+
+ This driver can only be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called n_hdlc. If you want to do that, say M
+ here.
+
+config RISCOM8
+ tristate "SDL RISCom/8 card support"
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ help
+ This is a driver for the SDL Communications RISCom/8 multiport card,
+ which gives you many serial ports. You would need something like
+ this to connect more than two modems to your Linux box, for instance
+ in order to become a dial-in server. If you have a card like that,
+ say Y here and read the file <file:Documentation/riscom8.txt>.
+
+ Also it's possible to say M here and compile this driver as kernel
+ loadable module; the module will be called riscom8.
+
+config SPECIALIX
+ tristate "Specialix IO8+ card support"
+ depends on SERIAL_NONSTANDARD
+ help
+ This is a driver for the Specialix IO8+ multiport card (both the
+ ISA and the PCI version) which gives you many serial ports. You
+ would need something like this to connect more than two modems to
+ your Linux box, for instance in order to become a dial-in server.
+
+ If you have a card like that, say Y here and read the file
+ <file:Documentation/specialix.txt>. Also it's possible to say M here
+ and compile this driver as kernel loadable module which will be
+ called specialix.
+
+config SPECIALIX_RTSCTS
+ bool "Specialix DTR/RTS pin is RTS"
+ depends on SPECIALIX
+ help
+ The Specialix IO8+ card can only support either RTS or DTR. If you
+ say N here, the driver will use the pin as "DTR" when the tty is in
+ software handshake mode. If you say Y here or hardware handshake is
+ on, it will always be RTS. Read the file
+ <file:Documentation/specialix.txt> for more information.
+
+config SX
+ tristate "Specialix SX (and SI) card support"
+ depends on SERIAL_NONSTANDARD
+ help
+ This is a driver for the SX and SI multiport serial cards.
+ Please read the file <file:Documentation/sx.txt> for details.
+
+ This driver can only be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called sx. If you want to do that, say M here.
+
+config RIO
+ tristate "Specialix RIO system support"
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ help
+ This is a driver for the Specialix RIO, a smart serial card which
+ drives an outboard box that can support up to 128 ports. Product
+ information is at <http://www.perle.com/support/documentation.html#multiport>.
+ There are both ISA and PCI versions.
+
+config RIO_OLDPCI
+ bool "Support really old RIO/PCI cards"
+ depends on RIO
+ help
+ Older RIO PCI cards need some initialization-time configuration to
+ determine the IRQ and some control addresses. If you have a RIO and
+ this doesn't seem to work, try setting this to Y.
+
+config STALDRV
+ bool "Stallion multiport serial support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Stallion cards give you many serial ports. You would need something
+ like this to connect more than two modems to your Linux box, for
+ instance in order to become a dial-in server. If you say Y here,
+ you will be asked for your specific card model in the next
+ questions. Make sure to read <file:Documentation/stallion.txt> in
+ this case. If you have never heard about all this, it's safe to
+ say N.
+
+config STALLION
+ tristate "Stallion EasyIO or EC8/32 support"
+ depends on STALDRV && BROKEN_ON_SMP
+ help
+ If you have an EasyIO or EasyConnection 8/32 multiport Stallion
+ card, then this is for you; say Y. Make sure to read
+ <file:Documentation/stallion.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stallion.
+
+config ISTALLION
+ tristate "Stallion EC8/64, ONboard, Brumby support"
+ depends on STALDRV && BROKEN_ON_SMP
+ help
+ If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
+ serial multiport card, say Y here. Make sure to read
+ <file:Documentation/stallion.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called istallion.
+
+config AU1000_UART
+ bool "Enable Au1000 UART Support"
+ depends on SERIAL_NONSTANDARD && MIPS
+ help
+ If you have an Alchemy AU1000 processor (MIPS based) and you want
+ to use serial ports, say Y. Otherwise, say N.
+
+config AU1000_SERIAL_CONSOLE
+ bool "Enable Au1000 serial console"
+ depends on AU1000_UART
+ help
+ If you have an Alchemy AU1000 processor (MIPS based) and you want
+ to use a console on a serial port, say Y. Otherwise, say N.
+
+config QTRONIX_KEYBOARD
+ bool "Enable Qtronix 990P Keyboard Support"
+ depends on IT8712
+ help
+ Images of Qtronix keyboards are at
+ <http://www.qtronix.com/keyboard.html>.
+
+config IT8172_CIR
+ bool
+ depends on QTRONIX_KEYBOARD
+ default y
+
+config IT8172_SCR0
+ bool "Enable Smart Card Reader 0 Support "
+ depends on IT8712
+ help
+ Say Y here to support smart-card reader 0 (SCR0) on the Integrated
+ Technology Express, Inc. ITE8172 SBC. Vendor page at
+ <http://www.ite.com.tw/ia/brief_it8172bsp.htm>; picture of the
+ board at <http://www.mvista.com/partners/semiconductor/ite.html>.
+
+config IT8172_SCR1
+ bool "Enable Smart Card Reader 1 Support "
+ depends on IT8712
+ help
+ Say Y here to support smart-card reader 1 (SCR1) on the Integrated
+ Technology Express, Inc. ITE8172 SBC. Vendor page at
+ <http://www.ite.com.tw/ia/brief_it8172bsp.htm>; picture of the
+ board at <http://www.mvista.com/partners/semiconductor/ite.html>.
+
+config A2232
+ tristate "Commodore A2232 serial support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
+ ---help---
+ This option supports the 2232 7-port serial card shipped with the
+ Amiga 2000 and other Zorro-bus machines, dating from 1989. At
+ a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
+ each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
+ ports were connected with 8 pin DIN connectors on the card bracket,
+ for which 8 pin to DB25 adapters were supplied. The card also had
+ jumpers internally to toggle various pinning configurations.
+
+ This driver can be built as a module; but then "generic_serial"
+ will also be built as a module. This has to be loaded before
+ "ser_a2232". If you want to do this, answer M here.
+
+config SGI_SNSC
+ bool "SGI Altix system controller communication support"
+ depends on (IA64_SGI_SN2 || IA64_GENERIC)
+ help
+ If you have an SGI Altix and you want to enable system
+ controller communication from user space (you want this!),
+ say Y. Otherwise, say N.
+
+source "drivers/serial/Kconfig"
+
+config UNIX98_PTYS
+ bool "Unix98 PTY support" if EMBEDDED
+ default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx for
+ masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
+ has a number of problems. The GNU C library glibc 2.1 and later,
+ however, supports the Unix98 naming standard: in order to acquire a
+ pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
+ terminal is then made available to the process and the pseudo
+ terminal slave can be accessed as /dev/pts/<number>. What was
+ traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
+
+ All modern Linux systems use the Unix98 ptys. Say Y unless
+ you're on an embedded system and want to conserve memory.
+
+config LEGACY_PTYS
+ bool "Legacy (BSD) PTY support"
+ default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx
+ for masters and /dev/ttyxx for slaves of pseudo
+ terminals. This scheme has a number of problems, including
+ security. This option enables these legacy devices; on most
+ systems, it is safe to say N.
+
+
+config LEGACY_PTY_COUNT
+ int "Maximum number of legacy PTY in use"
+ depends on LEGACY_PTYS
+ range 1 256
+ default "256"
+ ---help---
+ The maximum number of legacy PTYs that can be used at any one time.
+ The default is 256, and should be more than enough. Embedded
+ systems may want to reduce this to save memory.
+
+ When not in use, each legacy PTY occupies 12 bytes on 32-bit
+ architectures and 24 bytes on 64-bit architectures.
+
+config PRINTER
+ tristate "Parallel printer support"
+ depends on PARPORT
+ ---help---
+ If you intend to attach a printer to the parallel port of your Linux
+ box (as opposed to using a serial printer; if the connector at the
+ printer has 9 or 25 holes ["female"], then it's serial), say Y.
+ Also read the Printing-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ It is possible to share one parallel port among several devices
+ (e.g. printer and ZIP drive) and it is safe to compile the
+ corresponding drivers into the kernel.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/parport.txt>. The module will be called lp.
+
+ If you have several parallel ports, you can specify which ports to
+ use with the "lp" kernel command line option. (Try "man bootparam"
+ or see the documentation of your boot loader (lilo or loadlin) about
+ how to pass options to the kernel at boot time.) The syntax of the
+ "lp" command line option can be found in <file:drivers/char/lp.c>.
+
+ If you have more than 8 printers, you need to increase the LP_NO
+ macro in lp.c and the PARPORT_MAX macro in parport.h.
+
+config LP_CONSOLE
+ bool "Support for console on line printer"
+ depends on PRINTER
+ ---help---
+ If you want kernel messages to be printed out as they occur, you
+ can have a console on the printer. This option adds support for
+ doing that; to actually get it to happen you need to pass the
+ option "console=lp0" to the kernel at boot time.
+
+ If the printer is out of paper (or off, or unplugged, or too
+ busy..) the kernel will stall until the printer is ready again.
+ By defining CONSOLE_LP_STRICT to 0 (at your own risk) you
+ can make the kernel continue when this happens,
+ but it'll lose the kernel messages.
+
+ If unsure, say N.
+
+config PPDEV
+ tristate "Support for user-space parallel port device drivers"
+ depends on PARPORT
+ ---help---
+ Saying Y to this adds support for /dev/parport device nodes. This
+ is needed for programs that want portable access to the parallel
+ port, for instance deviceid (which displays Plug-and-Play device
+ IDs).
+
+ This is the parallel port equivalent of SCSI generic support (sg).
+ It is safe to say N to this -- it is not needed for normal printing
+ or parallel port CD-ROM/disk support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppdev.
+
+ If unsure, say N.
+
+config TIPAR
+ tristate "Texas Instruments parallel link cable support"
+ depends on PARPORT
+ ---help---
+ If you own a Texas Instruments graphing calculator and use a
+ parallel link cable, then you might be interested in this driver.
+
+ If you enable this driver, you will be able to communicate with
+ your calculator through a set of device nodes under /dev. The
+ main advantage of this driver is that you don't have to be root
+ to use this precise link cable (depending on the permissions on
+ the device nodes, though).
+
+ To compile this driver as a module, choose M here: the
+ module will be called tipar.
+
+ If you don't know what a parallel link cable is or what a Texas
+ Instruments graphing calculator is, then you probably don't need this
+ driver.
+
+ If unsure, say N.
+
+config HVC_CONSOLE
+ bool "pSeries Hypervisor Virtual Console support"
+ depends on PPC_PSERIES
+ help
+ pSeries machines when partitioned support a hypervisor virtual
+ console. This driver allows each pSeries partition to have a console
+ which is accessed via the HMC.
+
+config HVCS
+ tristate "IBM Hypervisor Virtual Console Server support"
+ depends on PPC_PSERIES
+ help
+ Partitionable IBM Power5 ppc64 machines allow hosting of
+ firmware virtual consoles from one Linux partition by
+ another Linux partition. This driver allows console data
+ from Linux partitions to be accessed through TTY device
+ interfaces in the device tree of a Linux partition running
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hvcs.ko. Additionally, this module
+ will depend on arch specific APIs exported from hvcserver.ko
+ which will also be compiled when this driver is built as a
+ module.
+
+source "drivers/char/ipmi/Kconfig"
+
+source "drivers/char/watchdog/Kconfig"
+
+config DS1620
+ tristate "NetWinder thermometer support"
+ depends on ARCH_NETWINDER
+ help
+ Say Y here to include support for the thermal management hardware
+ found in the NetWinder. This driver allows the user to control the
+ temperature set points and to read the current temperature.
+
+ It is also possible to say M here to build it as a module (ds1620)
+ It is recommended to be used on a NetWinder, but it is not a
+ necessity.
+
+config NWBUTTON
+ tristate "NetWinder Button"
+ depends on ARCH_NETWINDER
+ ---help---
+ If you say Y here and create a character device node /dev/nwbutton
+ with major and minor numbers 10 and 158 ("man mknod"), then every
+ time the orange button is pressed a number of times, the number of
+ times the button was pressed will be written to that device.
+
+ This is most useful for applications, as yet unwritten, which
+ perform actions based on how many times the button is pressed in a
+ row.
+
+ Do not hold the button down for too long, as the driver does not
+ alter the behaviour of the hardware reset circuitry attached to the
+ button; it will still execute a hard reset if the button is held
+ down for longer than approximately five seconds.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwbutton.
+
+ Most people will answer Y to this question and "Reboot Using Button"
+ below to be able to initiate a system shutdown from the button.
+
+config NWBUTTON_REBOOT
+ bool "Reboot Using Button"
+ depends on NWBUTTON
+ help
+ If you say Y here, then you will be able to initiate a system
+ shutdown and reboot by pressing the orange button a number of times.
+ The number of presses to initiate the shutdown is two by default,
+ but this can be altered by modifying the value of NUM_PRESSES_REBOOT
+ in nwbutton.h and recompiling the driver or, if you compile the
+ driver as a module, you can specify the number of presses at load
+ time with "insmod button reboot_count=<something>".
+
+config NWFLASH
+ tristate "NetWinder flash support"
+ depends on ARCH_NETWINDER
+ ---help---
+ If you say Y here and create a character device /dev/flash with
+ major 10 and minor 160 you can manipulate the flash ROM containing
+ the NetWinder firmware. Be careful as accidentally overwriting the
+ flash contents can render your computer unbootable. On no account
+ allow random users access to this device. :-)
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwflash.
+
+ If you're not sure, say N.
+
+config HW_RANDOM
+ tristate "Intel/AMD/VIA HW Random Number Generator support"
+ depends on (X86 || IA64) && PCI
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Intel i8xx-based motherboards,
+ AMD 76x-based motherboards, and Via Nehemiah CPUs.
+
+ Provides a character driver, used to read() entropy data.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hw_random.
+
+ If unsure, say N.
+
+config NVRAM
+ tristate "/dev/nvram support"
+ depends on ATARI || X86 || X86_64 || ARM || GENERIC_NVRAM
+ ---help---
+ If you say Y here and create a character special file /dev/nvram
+ with major number 10 and minor number 144 using mknod ("man mknod"),
+ you get read and write access to the extra bytes of non-volatile
+ memory in the real time clock (RTC), which is contained in every PC
+ and most Ataris. The actual number of bytes varies, depending on the
+ nvram in the system, but is usually 114 (128-14 for the RTC).
+
+ This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
+ on Ataris. /dev/nvram may be used to view settings there, or to
+ change them (with some utility). It could also be used to frequently
+ save a few bits of very important data that may not be lost over
+ power-off and for which writing to disk is too insecure. Note
+ however that most NVRAM space in a PC belongs to the BIOS and you
+ should NEVER idly tamper with it. See Ralf Brown's interrupt list
+ for a guide to the use of CMOS bytes by your BIOS.
+
+ On Atari machines, /dev/nvram is always configured and does not need
+ to be selected.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvram.
+
+config RTC
+ tristate "Enhanced Real Time Clock Support"
+ depends on !PPC32 && !PARISC && !IA64 && !M68K
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ Every PC has such a clock built in. It can be used to generate
+ signals from as low as 1Hz up to 8192Hz, and can also be used
+ as a 24 hour alarm. It reports status information via the file
+ /proc/driver/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+ If you run Linux on a multiprocessor machine and said Y to
+ "Symmetric Multi Processing" above, you should say Y here to read
+ and set the RTC in an SMP compatible fashion.
+
+ If you think you have a use for such a device (such as periodic data
+ sampling), then say Y here, and read <file:Documentation/rtc.txt>
+ for details.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc.
+
+config SGI_DS1286
+ tristate "SGI DS1286 RTC support"
+ depends on SGI_IP22
+ help
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock built into your computer.
+ Every SGI has such a clock built in. It reports status information
+ via the file /proc/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+config SGI_IP27_RTC
+ bool "SGI M48T35 RTC support"
+ depends on SGI_IP27
+ help
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock built into your computer.
+ Every SGI has such a clock built in. It reports status information
+ via the file /proc/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+config GEN_RTC
+ tristate "Generic /dev/rtc emulation"
+ depends on RTC!=y && !IA64 && !ARM
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ It reports status information via the file /proc/driver/rtc and its
+ behaviour is set by various ioctls on /dev/rtc. If you enable the
+ "extended RTC operation" below it will also provide an emulation
+ for RTC_UIE which is required by some programs and may improve
+ precision in some cases.
+
+ To compile this driver as a module, choose M here: the
+ module will be called genrtc.
+
+config GEN_RTC_X
+ bool "Extended RTC operation"
+ depends on GEN_RTC
+ help
+ Provides an emulation for RTC_UIE which is required by some programs
+ and may improve precision of the generic RTC support in some cases.
+
+config EFI_RTC
+ bool "EFI Real Time Clock Services"
+ depends on IA64
+
+config DS1302
+ tristate "DS1302 RTC support"
+ depends on M32R && (PLAT_M32700UT || PLAT_OPSPUT)
+ help
+ If you say Y here and create a character special file /dev/rtc with
+ major number 121 and minor number 0 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+config S3C2410_RTC
+ bool "S3C2410 RTC Driver"
+ depends on ARCH_S3C2410
+ help
+ RTC (Realtime Clock) driver for the clock inbuilt into the
+ Samsung S3C2410. This can provide periodic interrupt rates
+ from 1Hz to 64Hz for user programs, and wakeup from Alarm.
+
+config RTC_VR41XX
+ tristate "NEC VR4100 series Real Time Clock Support"
+ depends on CPU_VR41XX
+
+config COBALT_LCD
+ bool "Support for Cobalt LCD"
+ depends on MIPS_COBALT
+ help
+ This option enables support for the LCD display and buttons found
+ on Cobalt systems through a misc device.
+
+config DTLK
+ tristate "Double Talk PC internal speech card support"
+ help
+ This driver is for the DoubleTalk PC, a speech synthesizer
+ manufactured by RC Systems (<http://www.rcsys.com/>). It is also
+ called the `internal DoubleTalk'.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dtlk.
+
+config R3964
+ tristate "Siemens R3964 line discipline"
+ ---help---
+ This driver allows synchronous communication with devices using the
+ Siemens R3964 packet protocol. Unless you are dealing with special
+ hardware like PLCs, you are unlikely to need this.
+
+ To compile this driver as a module, choose M here: the
+ module will be called n_r3964.
+
+ If unsure, say N.
+
+config APPLICOM
+ tristate "Applicom intelligent fieldbus card support"
+ depends on PCI
+ ---help---
+ This driver provides the kernel-side support for the intelligent
+ fieldbus cards made by Applicom International. More information
+ about these cards can be found on the WWW at the address
+ <http://www.applicom-int.com/>, or by email from David Woodhouse
+ <dwmw2@infradead.org>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called applicom.
+
+ If unsure, say N.
+
+config SONYPI
+ tristate "Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && X86 && PCI && INPUT && !64BIT
+ ---help---
+ This driver enables access to the Sony Programmable I/O Control
+ Device which can be found in many (all ?) Sony Vaio laptops.
+
+ If you have one of those laptops, read
+ <file:Documentation/sonypi.txt>, and say Y or M here.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sonypi.
+
+config TANBAC_TB0219
+ tristate "TANBAC TB0219 base board support"
+ depends TANBAC_TB0229
+
+
+menu "Ftape, the floppy tape device driver"
+
+config FTAPE
+ tristate "Ftape (QIC-80/Travan) support"
+ depends on BROKEN_ON_SMP && (ALPHA || X86)
+ ---help---
+ If you have a tape drive that is connected to your floppy
+ controller, say Y here.
+
+ Some tape drives (like the Seagate "Tape Store 3200" or the Iomega
+ "Ditto 3200" or the Exabyte "Eagle TR-3") come with a "high speed"
+ controller of their own. These drives (and their companion
+ controllers) are also supported if you say Y here.
+
+ If you have a special controller (such as the CMS FC-10, FC-20,
+ Mountain Mach-II, or any controller that is based on the Intel 82078
+ FDC like the high speed controllers by Seagate and Exabyte and
+ Iomega's "Ditto Dash") you must configure it by selecting the
+ appropriate entries from the "Floppy tape controllers" sub-menu
+ below and possibly modify the default values for the IRQ and DMA
+ channel and the IO base in ftape's configuration menu.
+
+ If you want to use your floppy tape drive on a PCI-bus based system,
+ please read the file <file:drivers/char/ftape/README.PCI>.
+
+ The ftape kernel driver is also available as a runtime loadable
+ module. To compile this driver as a module, choose M here: the
+ module will be called ftape.
+
+ Note that the Ftape-HOWTO is out of date (sorry) and documents the
+ older version 2.08 of this software but still contains useful
+ information. There is a web page with more recent documentation at
+ <http://www.instmath.rwth-aachen.de/~heine/ftape/>. This page
+ always contains the latest release of the ftape driver and useful
+ information (backup software, ftape related patches and
+ documentation, FAQ). Note that the file system interface has
+ changed quite a bit compared to previous versions of ftape. Please
+ read <file:Documentation/ftape.txt>.
+
+source "drivers/char/ftape/Kconfig"
+
+endmenu
+
+source "drivers/char/agp/Kconfig"
+
+source "drivers/char/drm/Kconfig"
+
+source "drivers/char/pcmcia/Kconfig"
+
+config MWAVE
+ tristate "ACP Modem (Mwave) support"
+ depends on X86
+ select SERIAL_8250
+ ---help---
+ The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
+ kernel driver and a user level application. Together these components
+ support direct attachment to public switched telephone networks (PSTNs)
+ and support selected world wide countries.
+
+ This version of the ACP Modem driver supports the IBM Thinkpad 600E,
+ 600, and 770 that include on board ACP modem hardware.
+
+ The modem also supports the standard communications port interface
+ (ttySx) and is compatible with the Hayes AT Command Set.
+
+ The user level application needed to use this driver can be found at
+ the IBM Linux Technology Center (LTC) web site:
+ <http://www.ibm.com/linux/ltc/>.
+
+ If you own one of the above IBM Thinkpads which has the Mwave chipset
+ in it, say Y.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mwave.
+
+config SCx200_GPIO
+ tristate "NatSemi SCx200 GPIO Support"
+ depends on SCx200
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor SCx200 processors.
+
+ If compiled as a module, it will be called scx200_gpio.
+
+config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)"
+ help
+ The raw driver permits block devices to be bound to /dev/raw/rawN.
+ Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+ See the raw(8) manpage for more details.
+
+ The raw driver is deprecated and may be removed from 2.7
+ kernels. Applications should simply open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
+
+config HPET
+ bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ default n
+ depends on ACPI
+ help
+ If you say Y here, you will have a miscdevice named "/dev/hpet/". Each
+ open selects one of the timers supported by the HPET. The timers are
+ non-periodioc and/or periodic.
+
+config HPET_RTC_IRQ
+ bool "HPET Control RTC IRQ" if !HPET_EMULATE_RTC
+ default n
+ depends on HPET
+ help
+ If you say Y here, you will disable RTC_IRQ in drivers/char/rtc.c. It
+ is assumed the platform called hpet_alloc with the RTC IRQ values for
+ the HPET timers.
+
+config HPET_MMAP
+ bool "Allow mmap of HPET"
+ default y
+ depends on HPET
+ help
+ If you say Y here, user applications will be able to mmap
+ the HPET registers.
+
+ In some hardware implementations, the page containing HPET
+ registers may also contain other things that shouldn't be
+ exposed to the user. If this applies to your hardware,
+ say N here.
+
+config MAX_RAW_DEVS
+ int "Maximum number of RAW devices to support (1-8192)"
+ depends on RAW_DRIVER
+ default "256"
+ help
+ The maximum number of RAW devices that are supported.
+ Default is 256. Increase this number in case you need lots of
+ raw devices.
+
+config HANGCHECK_TIMER
+ tristate "Hangcheck timer"
+ depends on X86_64 || X86
+ help
+ The hangcheck-timer module detects when the system has gone
+ out to lunch past a certain margin. It can reboot the system
+ or merely print a warning.
+
+config MMTIMER
+ tristate "MMTIMER Memory mapped RTC for SGI Altix"
+ depends on IA64_GENERIC || IA64_SGI_SN2
+ default y
+ help
+ The mmtimer device allows direct userspace access to the
+ Altix system timer.
+
+source "drivers/char/tpm/Kconfig"
+
+endmenu
+
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
new file mode 100644
index 00000000000..54ed76af1a4
--- /dev/null
+++ b/drivers/char/Makefile
@@ -0,0 +1,118 @@
+#
+# Makefile for the kernel character device drivers.
+#
+
+#
+# This file contains the font map for the default (hardware) font
+#
+FONTMAPFILE = cp437.uni
+
+obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o
+
+obj-$(CONFIG_LEGACY_PTYS) += pty.o
+obj-$(CONFIG_UNIX98_PTYS) += pty.o
+obj-y += misc.o
+obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o consolemap.o \
+ consolemap_deftbl.o selection.o keyboard.o
+obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+obj-$(CONFIG_ESPSERIAL) += esp.o
+obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
+obj-$(CONFIG_ROCKETPORT) += rocket.o
+obj-$(CONFIG_SERIAL167) += serial167.o
+obj-$(CONFIG_CYCLADES) += cyclades.o
+obj-$(CONFIG_STALLION) += stallion.o
+obj-$(CONFIG_ISTALLION) += istallion.o
+obj-$(CONFIG_DIGIEPCA) += epca.o
+obj-$(CONFIG_SPECIALIX) += specialix.o
+obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
+obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
+obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
+obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
+obj-$(CONFIG_COMPUTONE) += ip2.o ip2main.o
+obj-$(CONFIG_RISCOM8) += riscom8.o
+obj-$(CONFIG_ISI) += isicom.o
+obj-$(CONFIG_SYNCLINK) += synclink.o
+obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
+obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
+obj-$(CONFIG_SX) += sx.o generic_serial.o
+obj-$(CONFIG_RIO) += rio/ generic_serial.o
+obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o
+obj-$(CONFIG_RAW_DRIVER) += raw.o
+obj-$(CONFIG_SGI_SNSC) += snsc.o
+obj-$(CONFIG_MMTIMER) += mmtimer.o
+obj-$(CONFIG_VIOCONS) += viocons.o
+obj-$(CONFIG_VIOTAPE) += viotape.o
+obj-$(CONFIG_HVCS) += hvcs.o
+
+obj-$(CONFIG_PRINTER) += lp.o
+obj-$(CONFIG_TIPAR) += tipar.o
+
+obj-$(CONFIG_DTLK) += dtlk.o
+obj-$(CONFIG_R3964) += n_r3964.o
+obj-$(CONFIG_APPLICOM) += applicom.o
+obj-$(CONFIG_SONYPI) += sonypi.o
+obj-$(CONFIG_RTC) += rtc.o
+obj-$(CONFIG_HPET) += hpet.o
+obj-$(CONFIG_GEN_RTC) += genrtc.o
+obj-$(CONFIG_EFI_RTC) += efirtc.o
+obj-$(CONFIG_SGI_DS1286) += ds1286.o
+obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
+obj-$(CONFIG_DS1302) += ds1302.o
+obj-$(CONFIG_S3C2410_RTC) += s3c2410-rtc.o
+obj-$(CONFIG_RTC_VR41XX) += vr41xx_rtc.o
+ifeq ($(CONFIG_GENERIC_NVRAM),y)
+ obj-$(CONFIG_NVRAM) += generic_nvram.o
+else
+ obj-$(CONFIG_NVRAM) += nvram.o
+endif
+obj-$(CONFIG_TOSHIBA) += toshiba.o
+obj-$(CONFIG_I8K) += i8k.o
+obj-$(CONFIG_DS1620) += ds1620.o
+obj-$(CONFIG_HW_RANDOM) += hw_random.o
+obj-$(CONFIG_FTAPE) += ftape/
+obj-$(CONFIG_COBALT_LCD) += lcd.o
+obj-$(CONFIG_PPDEV) += ppdev.o
+obj-$(CONFIG_NWBUTTON) += nwbutton.o
+obj-$(CONFIG_NWFLASH) += nwflash.o
+obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
+
+obj-$(CONFIG_WATCHDOG) += watchdog/
+obj-$(CONFIG_MWAVE) += mwave/
+obj-$(CONFIG_AGP) += agp/
+obj-$(CONFIG_DRM) += drm/
+obj-$(CONFIG_PCMCIA) += pcmcia/
+obj-$(CONFIG_IPMI_HANDLER) += ipmi/
+
+obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
+obj-$(CONFIG_TCG_TPM) += tpm/
+# Files generated that shall be removed upon make clean
+clean-files := consolemap_deftbl.c defkeymap.c qtronixmap.c
+
+quiet_cmd_conmk = CONMK $@
+ cmd_conmk = scripts/conmakehash $< > $@
+
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+ $(call cmd,conmk)
+
+$(obj)/defkeymap.o: $(obj)/defkeymap.c
+
+$(obj)/qtronixmap.o: $(obj)/qtronixmap.c
+
+# Uncomment if you're changing the keymap and have an appropriate
+# loadkeys version for the map. By default, we'll use the shipped
+# versions.
+# GENERATE_KEYMAP := 1
+
+ifdef GENERATE_KEYMAP
+
+$(obj)/defkeymap.c $(obj)/qtronixmap.c: $(obj)/%.c: $(src)/%.map
+ loadkeys --mktable $< > $@.tmp
+ sed -e 's/^static *//' $@.tmp > $@
+ rm $@.tmp
+
+endif
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
new file mode 100644
index 00000000000..7f8c1b53b75
--- /dev/null
+++ b/drivers/char/agp/Kconfig
@@ -0,0 +1,171 @@
+config AGP
+ tristate "/dev/agpgart (AGP Support)" if !GART_IOMMU
+ depends on ALPHA || IA64 || PPC || X86
+ default y if GART_IOMMU
+ ---help---
+ AGP (Accelerated Graphics Port) is a bus system mainly used to
+ connect graphics cards to the rest of the system.
+
+ If you have an AGP system and you say Y here, it will be possible to
+ use the AGP features of your 3D rendering video card. This code acts
+ as a sort of "AGP driver" for the motherboard's chipset.
+
+ If you need more texture memory than you can get with the AGP GART
+ (theoretically up to 256 MB, but in practice usually 64 or 128 MB
+ due to kernel allocation issues), you could use PCI accesses
+ and have up to a couple gigs of texture space.
+
+ Note that this is the only means to have XFree4/GLX use
+ write-combining with MTRR support on the AGP bus. Without it, OpenGL
+ direct rendering will be a lot slower but still faster than PIO.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called agpgart.
+
+config AGP_ALI
+ tristate "ALI chipset support"
+ depends on AGP && X86 && !X86_64
+ ---help---
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on the following ALi chipsets. The supported chipsets
+ include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
+ For the ALi-chipset question, ALi suggests you refer to
+ <http://www.ali.com.tw/eng/support/index.shtml>.
+
+ The M1541 chipset can do AGP 1x and 2x, but note that there is an
+ acknowledged incompatibility with Matrox G200 cards. Due to
+ timing issues, this chipset cannot do AGP 2x with the G200.
+ This is a hardware limitation. AGP 1x seems to be fine, though.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+config AGP_ATI
+ tristate "ATI chipset support"
+ depends on AGP && X86 && !X86_64
+ ---help---
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on the ATI RadeonIGP family of chipsets.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+config AGP_AMD
+ tristate "AMD Irongate, 761, and 762 chipset support"
+ depends on AGP && X86 && !X86_64
+ help
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+config AGP_AMD64
+ tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
+ depends on AGP && X86
+ default y if GART_IOMMU
+ help
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
+ You still need an external AGP bridge like the AMD 8151, VIA
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
+ with agp_try_unsupported=1.
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say Y
+
+config AGP_INTEL
+ tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of XFree86 4.x
+ on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
+ E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
+ 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI, or if you have any Intel integrated graphics
+ chipsets. If unsure, say Y.
+
+config AGP_NVIDIA
+ tristate "NVIDIA nForce/nForce2 chipset support"
+ depends on AGP && X86 && !X86_64
+ help
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on the following NVIDIA chipsets. The supported chipsets
+ include nForce and nForce2
+
+config AGP_SIS
+ tristate "SiS chipset support"
+ depends on AGP && X86 && !X86_64
+ help
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on Silicon Integrated Systems [SiS] chipsets.
+
+ Note that 5591/5592 AGP chipsets are NOT supported.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+config AGP_SWORKS
+ tristate "Serverworks LE/HE chipset support"
+ depends on AGP && X86 && !X86_64
+ help
+ Say Y here to support the Serverworks AGP card. See
+ <http://www.serverworks.com/> for product descriptions and images.
+
+config AGP_VIA
+ tristate "VIA chipset support"
+ depends on AGP && X86 && !X86_64
+ help
+ This option gives you AGP support for the GLX component of
+ XFree86 4.x on VIA MVP3/Apollo Pro chipsets.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say N.
+
+config AGP_I460
+ tristate "Intel 460GX chipset support"
+ depends on AGP && (IA64_DIG || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the Intel 460GX chipset
+ for IA64 processors.
+
+config AGP_HP_ZX1
+ tristate "HP ZX1 chipset AGP support"
+ depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the HP ZX1 chipset
+ for IA64 processors.
+
+config AGP_ALPHA_CORE
+ tristate "Alpha AGP support"
+ depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
+ default AGP
+
+config AGP_UNINORTH
+ tristate "Apple UniNorth & U3 AGP support"
+ depends on AGP && PPC_PMAC
+ help
+ This option gives you AGP support for Apple machines with a
+ UniNorth or U3 (Apple G5) bridge.
+
+config AGP_EFFICEON
+ tristate "Transmeta Efficeon support"
+ depends on AGP && X86 && !X86_64
+ help
+ This option gives you AGP support for the Transmeta Efficeon
+ series processors with integrated northbridges.
+
+ You should say Y here if you use XFree86 3.3.6 or 4.x and want to
+ use GLX or DRI. If unsure, say Y.
+
+config AGP_SGI_TIOCA
+ tristate "SGI TIO chipset AGP support"
+ depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the SGI TIO chipset
+ for IA64 processors.
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
new file mode 100644
index 00000000000..d33a22f2fa0
--- /dev/null
+++ b/drivers/char/agp/Makefile
@@ -0,0 +1,18 @@
+agpgart-y := backend.o frontend.o generic.o isoch.o
+
+obj-$(CONFIG_AGP) += agpgart.o
+obj-$(CONFIG_AGP_ALI) += ali-agp.o
+obj-$(CONFIG_AGP_ATI) += ati-agp.o
+obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
+obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
+obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
+obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
+obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
+obj-$(CONFIG_AGP_I460) += i460-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
+obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
+obj-$(CONFIG_AGP_SIS) += sis-agp.o
+obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
+obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
+obj-$(CONFIG_AGP_VIA) += via-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644
index 00000000000..ad9c11391d8
--- /dev/null
+++ b/drivers/char/agp/agp.h
@@ -0,0 +1,331 @@
+/*
+ * AGPGART
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2004 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+#include <asm/agp.h> /* for flush_agp_cache() */
+
+#define PFX "agpgart: "
+
+//#define AGP_DEBUG 1
+#ifdef AGP_DEBUG
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y)
+#else
+#define DBG(x,y...) do { } while (0)
+#endif
+
+extern struct agp_bridge_data *agp_bridge;
+
+enum aper_size_type {
+ U8_APER_SIZE,
+ U16_APER_SIZE,
+ U32_APER_SIZE,
+ LVL2_APER_SIZE,
+ FIXED_APER_SIZE
+};
+
+struct gatt_mask {
+ unsigned long mask;
+ u32 type;
+ /* totally device specific, for integrated chipsets that
+ * might have different types of memory masks. For other
+ * devices this will probably be ignored */
+};
+
+struct aper_size_info_8 {
+ int size;
+ int num_entries;
+ int page_order;
+ u8 size_value;
+};
+
+struct aper_size_info_16 {
+ int size;
+ int num_entries;
+ int page_order;
+ u16 size_value;
+};
+
+struct aper_size_info_32 {
+ int size;
+ int num_entries;
+ int page_order;
+ u32 size_value;
+};
+
+struct aper_size_info_lvl2 {
+ int size;
+ int num_entries;
+ u32 size_value;
+};
+
+struct aper_size_info_fixed {
+ int size;
+ int num_entries;
+ int page_order;
+};
+
+struct agp_bridge_driver {
+ struct module *owner;
+ void *aperture_sizes;
+ int num_aperture_sizes;
+ enum aper_size_type size_type;
+ int cant_use_aperture;
+ int needs_scratch_page;
+ struct gatt_mask *masks;
+ int (*fetch_size)(void);
+ int (*configure)(void);
+ void (*agp_enable)(struct agp_bridge_data *, u32);
+ void (*cleanup)(void);
+ void (*tlb_flush)(struct agp_memory *);
+ unsigned long (*mask_memory)(struct agp_bridge_data *,
+ unsigned long, int);
+ void (*cache_flush)(void);
+ int (*create_gatt_table)(struct agp_bridge_data *);
+ int (*free_gatt_table)(struct agp_bridge_data *);
+ int (*insert_memory)(struct agp_memory *, off_t, int);
+ int (*remove_memory)(struct agp_memory *, off_t, int);
+ struct agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type)(struct agp_memory *);
+ void *(*agp_alloc_page)(struct agp_bridge_data *);
+ void (*agp_destroy_page)(void *);
+};
+
+struct agp_bridge_data {
+ struct agp_version *version;
+ struct agp_bridge_driver *driver;
+ struct vm_operations_struct *vm_ops;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+ struct pci_dev *dev;
+ u32 __iomem *gatt_table;
+ u32 *gatt_table_real;
+ unsigned long scratch_page;
+ unsigned long scratch_page_real;
+ unsigned long gart_bus_addr;
+ unsigned long gatt_bus_addr;
+ u32 mode;
+ enum chipset_type type;
+ unsigned long *key_list;
+ atomic_t current_memory_agp;
+ atomic_t agp_in_use;
+ int max_memory_agp; /* in number of pages */
+ int aperture_size_idx;
+ int capndx;
+ int flags;
+ char major_version;
+ char minor_version;
+ struct list_head list;
+};
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
+#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
+#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
+#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
+#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
+#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
+#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
+#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
+#define MAXKEY (4096 * 32)
+
+#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
+
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+struct agp_device_ids {
+ unsigned short device_id; /* first, to make table easier to read */
+ enum chipset_type chipset;
+ const char *chipset_name;
+ int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
+};
+
+/* Driver registration */
+struct agp_bridge_data *agp_alloc_bridge(void);
+void agp_put_bridge(struct agp_bridge_data *bridge);
+int agp_add_bridge(struct agp_bridge_data *bridge);
+void agp_remove_bridge(struct agp_bridge_data *bridge);
+
+/* Frontend routines. */
+int agp_frontend_initialize(void);
+void agp_frontend_cleanup(void);
+
+/* Generic routines. */
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
+struct agp_memory *agp_create_memory(int scratch_pages);
+int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
+void agp_generic_free_by_type(struct agp_memory *curr);
+void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+void agp_generic_destroy_page(void *addr);
+void agp_free_key(int key);
+int agp_num_entries(void);
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
+void agp_device_command(u32 command, int agp_v3);
+int agp_3_5_enable(struct agp_bridge_data *bridge);
+void global_cache_flush(void);
+void get_agp_version(struct agp_bridge_data *bridge);
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type);
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+
+/* generic routines for agp>=3 */
+int agp3_generic_fetch_size(void);
+void agp3_generic_tlbflush(struct agp_memory *mem);
+int agp3_generic_configure(void);
+void agp3_generic_cleanup(void);
+
+/* aperture sizes have been standardised since v3 */
+#define AGP_GENERIC_SIZES_ENTRIES 11
+extern struct aper_size_info_16 agp3_generic_sizes[];
+
+
+extern int agp_off;
+extern int agp_try_unsupported_boot;
+
+/* Chipset independant registers (from AGP Spec) */
+#define AGP_APBASE 0x10
+
+#define AGPSTAT 0x4
+#define AGPCMD 0x8
+#define AGPNISTAT 0xc
+#define AGPCTRL 0x10
+#define AGPAPSIZE 0x14
+#define AGPNEPG 0x16
+#define AGPGARTLO 0x18
+#define AGPGARTHI 0x1c
+#define AGPNICMD 0x20
+
+#define AGP_MAJOR_VERSION_SHIFT (20)
+#define AGP_MINOR_VERSION_SHIFT (16)
+
+#define AGPSTAT_RQ_DEPTH (0xff000000)
+#define AGPSTAT_RQ_DEPTH_SHIFT 24
+
+#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
+#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
+#define AGPSTAT_ARQSZ_SHIFT 13
+
+#define AGPSTAT_SBA (1<<9)
+#define AGPSTAT_AGP_ENABLE (1<<8)
+#define AGPSTAT_FW (1<<4)
+#define AGPSTAT_MODE_3_0 (1<<3)
+
+#define AGPSTAT2_1X (1<<0)
+#define AGPSTAT2_2X (1<<1)
+#define AGPSTAT2_4X (1<<2)
+
+#define AGPSTAT3_RSVD (1<<2)
+#define AGPSTAT3_8X (1<<1)
+#define AGPSTAT3_4X (1)
+
+#define AGPCTRL_APERENB (1<<8)
+#define AGPCTRL_GTLBEN (1<<7)
+
+#define AGP2_RESERVED_MASK 0x00fffcc8
+#define AGP3_RESERVED_MASK 0x00ff00c4
+
+#define AGP_ERRATA_FASTWRITES 1<<0
+#define AGP_ERRATA_SBA 1<<1
+#define AGP_ERRATA_1X 1<<2
+
+#endif /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
new file mode 100644
index 00000000000..c86a22c5499
--- /dev/null
+++ b/drivers/char/agp/ali-agp.c
@@ -0,0 +1,414 @@
+/*
+ * ALi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+#define ALI_AGPCTRL 0xb8
+#define ALI_ATTBASE 0xbc
+#define ALI_TLBCTRL 0xc0
+#define ALI_TAGCTRL 0xc4
+#define ALI_CACHE_FLUSH_CTRL 0xD0
+#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
+#define ALI_CACHE_FLUSH_EN 0x100
+
+static int ali_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp &= ~(0xfffffff0);
+ values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ali_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xfffffff0;
+ temp |= (1<<0 | 1<<1);
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
+}
+
+static void ali_cleanup(void)
+{
+ struct aper_size_info_32 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_32(agp_bridge->previous_size);
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+// clear tag
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
+ ((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
+ ((temp & 0x00000ff0) | previous_size->size_value));
+}
+
+static int ali_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ /* aperture size and gatt addr */
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
+ | (current_size->size_value & 0xf));
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
+
+ /* tlb control */
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+#if 0
+ if (agp_bridge->type == ALI_M1541) {
+ u32 nlvm_addr = 0;
+
+ switch (current_size->size_value) {
+ case 0: break;
+ case 1: nlvm_addr = 0x100000;break;
+ case 2: nlvm_addr = 0x200000;break;
+ case 3: nlvm_addr = 0x400000;break;
+ case 4: nlvm_addr = 0x800000;break;
+ case 6: nlvm_addr = 0x1000000;break;
+ case 7: nlvm_addr = 0x2000000;break;
+ case 8: nlvm_addr = 0x4000000;break;
+ case 9: nlvm_addr = 0x8000000;break;
+ case 10: nlvm_addr = 0x10000000;break;
+ default: break;
+ }
+ nlvm_addr--;
+ nlvm_addr&=0xfff00000;
+
+ nlvm_addr+= agp_bridge->gart_bus_addr;
+ nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
+ printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
+ }
+#endif
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xffffff7f; //enable TLB
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
+
+ return 0;
+}
+
+
+static void m1541_cache_flush(void)
+{
+ int i, page_count;
+ u32 temp;
+
+ global_cache_flush();
+
+ page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
+ for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ (agp_bridge->gatt_bus_addr + i)) |
+ ALI_CACHE_FLUSH_EN));
+ }
+}
+
+static void *m1541_alloc_page(struct agp_bridge_data *bridge)
+{
+ void *addr = agp_generic_alloc_page(agp_bridge);
+ u32 temp;
+
+ if (!addr)
+ return NULL;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
+ return addr;
+}
+
+static void ali_destroy_page(void * addr)
+{
+ if (addr) {
+ global_cache_flush(); /* is this really needed? --hch */
+ agp_generic_destroy_page(addr);
+ }
+}
+
+static void m1541_destroy_page(void * addr)
+{
+ u32 temp;
+
+ if (addr == NULL)
+ return;
+
+ global_cache_flush();
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
+ agp_generic_destroy_page(addr);
+}
+
+
+/* Setup function */
+
+static struct aper_size_info_32 ali_generic_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+
+struct agp_bridge_driver ali_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = ali_destroy_page,
+};
+
+struct agp_bridge_driver ali_m1541_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = m1541_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = m1541_alloc_page,
+ .agp_destroy_page = m1541_destroy_page,
+};
+
+
+static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1541,
+ .chipset_name = "M1541",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1621,
+ .chipset_name = "M1621",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1631,
+ .chipset_name = "M1631",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1632,
+ .chipset_name = "M1632",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1641,
+ .chipset_name = "M1641",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1644,
+ .chipset_name = "M1644",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1647,
+ .chipset_name = "M1647",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1651,
+ .chipset_name = "M1651",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1671,
+ .chipset_name = "M1671",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1681,
+ .chipset_name = "M1681",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1683,
+ .chipset_name = "M1683",
+ },
+
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_ali_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ali_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 hidden_1621_id, cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_AL_M1541:
+ bridge->driver = &ali_m1541_bridge;
+ break;
+ case PCI_DEVICE_ID_AL_M1621:
+ pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+ switch (hidden_1621_id) {
+ case 0x31:
+ devs[j].chipset_name = "M1631";
+ break;
+ case 0x32:
+ devs[j].chipset_name = "M1632";
+ break;
+ case 0x41:
+ devs[j].chipset_name = "M1641";
+ break;
+ case 0x43:
+ devs[j].chipset_name = "M????";
+ break;
+ case 0x47:
+ devs[j].chipset_name = "M1647";
+ break;
+ case 0x51:
+ devs[j].chipset_name = "M1651";
+ break;
+ default:
+ break;
+ }
+ /*FALLTHROUGH*/
+ default:
+ bridge->driver = &ali_generic_bridge;
+ }
+
+ printk(KERN_INFO PFX "Detected ALi %s chipset\n",
+ devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_ali_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_ali_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
+
+static struct pci_driver agp_ali_pci_driver = {
+ .name = "agpgart-ali",
+ .id_table = agp_ali_pci_table,
+ .probe = agp_ali_probe,
+ .remove = agp_ali_remove,
+};
+
+static int __init agp_ali_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ali_pci_driver);
+}
+
+static void __exit agp_ali_cleanup(void)
+{
+ pci_unregister_driver(&agp_ali_pci_driver);
+}
+
+module_init(agp_ali_init);
+module_exit(agp_ali_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
new file mode 100644
index 00000000000..a072d32005a
--- /dev/null
+++ b/drivers/char/agp/alpha-agp.c
@@ -0,0 +1,216 @@
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/machvec.h>
+#include <asm/agp_backend.h>
+#include "../../../arch/alpha/kernel/pci_impl.h"
+
+#include "agp.h"
+
+static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ dma_addr_t dma_addr;
+ unsigned long pa;
+ struct page *page;
+
+ dma_addr = address - vma->vm_start + agp->aperture.bus_base;
+ pa = agp->ops->translate(agp, dma_addr);
+
+ if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */
+
+ /*
+ * Get the page, inc the use count, and return it
+ */
+ page = virt_to_page(__va(pa));
+ get_page(page);
+ if (type)
+ *type = VM_FAULT_MINOR;
+ return page;
+}
+
+static struct aper_size_info_fixed alpha_core_agp_sizes[] =
+{
+ { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+};
+
+struct vm_operations_struct alpha_core_agp_vm_ops = {
+ .nopage = alpha_core_agp_vm_nopage,
+};
+
+
+static int alpha_core_agp_nop(void)
+{
+ /* just return success */
+ return 0;
+}
+
+static int alpha_core_agp_fetch_size(void)
+{
+ return alpha_core_agp_sizes[0].size;
+}
+
+static int alpha_core_agp_configure(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ agp_bridge->gart_bus_addr = agp->aperture.bus_base;
+ return 0;
+}
+
+static void alpha_core_agp_cleanup(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+
+ agp->ops->cleanup(agp);
+}
+
+static void alpha_core_agp_tlbflush(struct agp_memory *mem)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
+}
+
+static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ alpha_agp_info *agp = bridge->dev_private_data;
+
+ agp->mode.lw = agp_collect_device_status(bridge, mode,
+ agp->capability.lw);
+
+ agp->mode.bits.enable = 1;
+ agp->ops->configure(agp);
+
+ agp_device_command(agp->mode.lw, 0);
+}
+
+static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int num_entries, status;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries) return -EINVAL;
+
+ status = agp->ops->bind(agp, pg_start, mem);
+ mb();
+ alpha_core_agp_tlbflush(mem);
+
+ return status;
+}
+
+static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int status;
+
+ status = agp->ops->unbind(agp, pg_start, mem);
+ alpha_core_agp_tlbflush(mem);
+ return status;
+}
+
+struct agp_bridge_driver alpha_core_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = alpha_core_agp_sizes,
+ .num_aperture_sizes = 1,
+ .size_type = FIXED_APER_SIZE,
+ .cant_use_aperture = 1,
+ .masks = NULL,
+
+ .fetch_size = alpha_core_agp_fetch_size,
+ .configure = alpha_core_agp_configure,
+ .agp_enable = alpha_core_agp_enable,
+ .cleanup = alpha_core_agp_cleanup,
+ .tlb_flush = alpha_core_agp_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = alpha_core_agp_nop,
+ .free_gatt_table = alpha_core_agp_nop,
+ .insert_memory = alpha_core_agp_insert_memory,
+ .remove_memory = alpha_core_agp_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+struct agp_bridge_data *alpha_bridge;
+
+int __init
+alpha_core_agp_setup(void)
+{
+ alpha_agp_info *agp = alpha_mv.agp_info();
+ struct pci_dev *pdev; /* faked */
+ struct aper_size_info_fixed *aper_size;
+
+ if (!agp)
+ return -ENODEV;
+ if (agp->ops->setup(agp))
+ return -ENODEV;
+
+ /*
+ * Build the aperture size descriptor
+ */
+ aper_size = alpha_core_agp_sizes;
+ aper_size->size = agp->aperture.size / (1024 * 1024);
+ aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
+ aper_size->page_order = __ffs(aper_size->num_entries / 1024);
+
+ /*
+ * Build a fake pci_dev struct
+ */
+ pdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+ pdev->vendor = 0xffff;
+ pdev->device = 0xffff;
+ pdev->sysdata = agp->hose;
+
+ alpha_bridge = agp_alloc_bridge();
+ if (!alpha_bridge)
+ goto fail;
+
+ alpha_bridge->driver = &alpha_core_agp_driver;
+ alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
+ alpha_bridge->current_size = aper_size; /* only 1 size */
+ alpha_bridge->dev_private_data = agp;
+ alpha_bridge->dev = pdev;
+ alpha_bridge->mode = agp->capability.lw;
+
+ printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
+ return agp_add_bridge(alpha_bridge);
+
+ fail:
+ kfree(pdev);
+ return -ENOMEM;
+}
+
+static int __init agp_alpha_core_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ if (alpha_mv.agp_info)
+ return alpha_core_agp_setup();
+ return -ENODEV;
+}
+
+static void __exit agp_alpha_core_cleanup(void)
+{
+ agp_remove_bridge(alpha_bridge);
+ agp_put_bridge(alpha_bridge);
+}
+
+module_init(agp_alpha_core_init);
+module_exit(agp_alpha_core_cleanup);
+
+MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
new file mode 100644
index 00000000000..f1ea87ea6b6
--- /dev/null
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -0,0 +1,542 @@
+/*
+ * AMD K7 AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+
+#define AMD_MMBASE 0x14
+#define AMD_APSIZE 0xac
+#define AMD_MODECNTL 0xb0
+#define AMD_MODECNTL2 0xb2
+#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
+#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
+
+static struct pci_device_id agp_amdk7_pci_table[];
+
+struct amd_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _amd_irongate_private {
+ volatile u8 __iomem *registers;
+ struct amd_page_map **gatt_pages;
+ int num_tables;
+} amd_irongate_private;
+
+static int amd_create_page_map(struct amd_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ SetPageReserved(virt_to_page(page_map->real));
+ global_cache_flush();
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ PAGE_SIZE);
+ if (page_map->remapped == NULL) {
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+ page_map->real = NULL;
+ return -ENOMEM;
+ }
+ global_cache_flush();
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static void amd_free_page_map(struct amd_page_map *page_map)
+{
+ iounmap(page_map->remapped);
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+}
+
+static void amd_free_gatt_pages(void)
+{
+ int i;
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+
+ tables = amd_irongate_private.gatt_pages;
+ for (i = 0; i < amd_irongate_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ amd_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+ amd_irongate_private.gatt_pages = NULL;
+}
+
+static int amd_create_gatt_pages(int nr_tables)
+{
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ memset (tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1));
+ for (i = 0; i < nr_tables; i++) {
+ entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ memset (entry, 0, sizeof(struct amd_page_map));
+ tables[i] = entry;
+ retval = amd_create_page_map(entry);
+ if (retval != 0)
+ break;
+ }
+ amd_irongate_private.num_tables = nr_tables;
+ amd_irongate_private.gatt_pages = tables;
+
+ if (retval != 0)
+ amd_free_gatt_pages();
+
+ return retval;
+}
+
+/* Since we don't need contigious memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int amd_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct amd_page_map page_dir;
+ unsigned long addr;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = amd_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = amd_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ amd_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static int amd_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct amd_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ amd_free_gatt_pages();
+ amd_free_page_map(&page_dir);
+ return 0;
+}
+
+static int amd_irongate_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+ struct aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u16 enable_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
+ readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
+
+ /* Write the Sync register */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
+
+ /* Set indexing mode */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
+
+ /* Write the enable register */
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg | 0x0004);
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write out the size register */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+
+ /* Flush the tlb */
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
+ return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+ u16 enable_reg;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg & ~(0x0004));
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write back the previous size and disable gart translation */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+ iounmap((void __iomem *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(struct agp_memory *temp)
+{
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
+}
+
+static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_generic_mask_memory(agp_bridge,
+ mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static struct aper_size_info_lvl2 amd_irongate_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static struct gatt_mask amd_irongate_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+struct agp_bridge_driver amd_irongate_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_irongate_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = amd_irongate_configure,
+ .fetch_size = amd_irongate_fetch_size,
+ .cleanup = amd_irongate_cleanup,
+ .tlb_flush = amd_irongate_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = amd_irongate_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = amd_create_gatt_table,
+ .free_gatt_table = amd_free_gatt_table,
+ .insert_memory = amd_insert_memory,
+ .remove_memory = amd_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .chipset_name = "Irongate",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .chipset_name = "761",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .chipset_name = "760MP",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_amdk7_pci_table;
+ printk(KERN_INFO PFX "Detected AMD %s chipset\n",
+ amd_agp_device_ids[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &amd_irongate_driver;
+ bridge->dev_private_data = &amd_irongate_private,
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* 751 Errata (22564_B-1.PDF)
+ erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
+ system controller may experience noise due to strong drive strengths
+ */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
+ u8 cap_ptr=0;
+ struct pci_dev *gfxcard=NULL;
+ while (!cap_ptr) {
+ gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
+ if (!gfxcard) {
+ printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ return -ENODEV;
+ }
+ cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
+ if (!cap_ptr) {
+ pci_dev_put(gfxcard);
+ continue;
+ }
+ }
+
+ /* With so many variants of NVidia cards, it's simpler just
+ to blacklist them all, and then whitelist them as needed
+ (if necessary at all). */
+ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
+ }
+ pci_dev_put(gfxcard);
+ }
+
+ /* 761 Errata (23613_F.pdf)
+ * Revisions B0/B1 were a disaster.
+ * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
+ * erratum 45: Timing problem prevents fast writes -- Disable fast write.
+ * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
+ * With this lot disabled, we should prevent lockups. */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
+ u8 revision=0;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
+ if (revision == 0x10 || revision == 0x11) {
+ agp_bridge->flags = AGP_ERRATA_FASTWRITES;
+ agp_bridge->flags |= AGP_ERRATA_SBA;
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
+ }
+ }
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+/* must be the same order as name table above */
+static struct pci_device_id agp_amdk7_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
+
+static struct pci_driver agp_amdk7_pci_driver = {
+ .name = "agpgart-amdk7",
+ .id_table = agp_amdk7_pci_table,
+ .probe = agp_amdk7_probe,
+ .remove = agp_amdk7_remove,
+};
+
+static int __init agp_amdk7_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_amdk7_pci_driver);
+}
+
+static void __exit agp_amdk7_cleanup(void)
+{
+ pci_unregister_driver(&agp_amdk7_pci_driver);
+}
+
+module_init(agp_amdk7_init);
+module_exit(agp_amdk7_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
new file mode 100644
index 00000000000..905f0629c44
--- /dev/null
+++ b/drivers/char/agp/amd64-agp.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright 2001-2003 SuSE Labs.
+ * Distributed under the GNU public license, v2.
+ *
+ * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
+ * It also includes support for the AMD 8151 AGP bridge,
+ * although it doesn't actually do much, as all the real
+ * work is done in the northbridge(s).
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+/* Will need to be increased if AMD64 ever goes >8-way. */
+#define MAX_HAMMER_GARTS 8
+
+/* PTE bits. */
+#define GPTE_VALID 1
+#define GPTE_COHERENT 2
+
+/* Aperture control register bits. */
+#define GARTEN (1<<0)
+#define DISGARTCPU (1<<4)
+#define DISGARTIO (1<<5)
+
+/* GART cache control register bits. */
+#define INVGART (1<<0)
+#define GARTPTEERR (1<<1)
+
+/* K8 On-cpu GART registers */
+#define AMD64_GARTAPERTURECTL 0x90
+#define AMD64_GARTAPERTUREBASE 0x94
+#define AMD64_GARTTABLEBASE 0x98
+#define AMD64_GARTCACHECTL 0x9c
+#define AMD64_GARTEN (1<<0)
+
+/* NVIDIA K8 registers */
+#define NVIDIA_X86_64_0_APBASE 0x10
+#define NVIDIA_X86_64_1_APBASE1 0x50
+#define NVIDIA_X86_64_1_APLIMIT1 0x54
+#define NVIDIA_X86_64_1_APSIZE 0xa8
+#define NVIDIA_X86_64_1_APBASE2 0xd8
+#define NVIDIA_X86_64_1_APLIMIT2 0xdc
+
+/* ULi K8 registers */
+#define ULI_X86_64_BASE_ADDR 0x10
+#define ULI_X86_64_HTT_FEA_REG 0x50
+#define ULI_X86_64_ENU_SCR_REG 0x54
+
+static int nr_garts;
+static struct pci_dev * hammers[MAX_HAMMER_GARTS];
+
+static struct resource *aperture_resource;
+static int __initdata agp_try_unsupported;
+
+static int gart_iterator;
+#define for_each_nb() for(gart_iterator=0;gart_iterator<nr_garts;gart_iterator++)
+
+static void flush_amd64_tlb(struct pci_dev *dev)
+{
+ u32 tmp;
+
+ pci_read_config_dword (dev, AMD64_GARTCACHECTL, &tmp);
+ tmp |= INVGART;
+ pci_write_config_dword (dev, AMD64_GARTCACHECTL, tmp);
+}
+
+static void amd64_tlbflush(struct agp_memory *temp)
+{
+ for_each_nb()
+ flush_amd64_tlb(hammers[gart_iterator]);
+}
+
+static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ long long tmp;
+ u32 pte;
+
+ num_entries = agp_num_entries();
+
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ /* Make sure we can fit the range in the gatt table. */
+ /* FIXME: could wrap */
+ if (((unsigned long)pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ /* gatt table should be empty. */
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ tmp = agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type);
+
+ BUG_ON(tmp & 0xffffff0000000ffcULL);
+ pte = (tmp & 0x000000ff00000000ULL) >> 28;
+ pte |=(tmp & 0x00000000fffff000ULL);
+ pte |= GPTE_VALID | GPTE_COHERENT;
+
+ writel(pte, agp_bridge->gatt_table+j);
+ readl(agp_bridge->gatt_table+j); /* PCI Posting. */
+ }
+ amd64_tlbflush(mem);
+ return 0;
+}
+
+/*
+ * This hack alters the order element according
+ * to the size of a long. It sucks. I totally disown this, even
+ * though it does appear to work for the most part.
+ */
+static struct aper_size_info_32 amd64_aperture_sizes[7] =
+{
+ {32, 8192, 3+(sizeof(long)/8), 0 },
+ {64, 16384, 4+(sizeof(long)/8), 1<<1 },
+ {128, 32768, 5+(sizeof(long)/8), 1<<2 },
+ {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
+ {512, 131072, 7+(sizeof(long)/8), 1<<3 },
+ {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
+ {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
+};
+
+
+/*
+ * Get the current Aperture size from the x86-64.
+ * Note, that there may be multiple x86-64's, but we just return
+ * the value from the first one we find. The set_size functions
+ * keep the rest coherent anyway. Or at least should do.
+ */
+static int amd64_fetch_size(void)
+{
+ struct pci_dev *dev;
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ dev = hammers[0];
+ if (dev==NULL)
+ return 0;
+
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
+ temp = (temp & 0xe);
+ values = A_SIZE_32(amd64_aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+/*
+ * In a multiprocessor x86-64 system, this function gets
+ * called once for each CPU.
+ */
+static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table)
+{
+ u64 aperturebase;
+ u32 tmp;
+ u64 addr, aper_base;
+
+ /* Address to map to */
+ pci_read_config_dword (hammer, AMD64_GARTAPERTUREBASE, &tmp);
+ aperturebase = tmp << 25;
+ aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* address of the mappings table */
+ addr = (u64) gatt_table;
+ addr >>= 12;
+ tmp = (u32) addr<<4;
+ tmp &= ~0xf;
+ pci_write_config_dword (hammer, AMD64_GARTTABLEBASE, tmp);
+
+ /* Enable GART translation for this hammer. */
+ pci_read_config_dword(hammer, AMD64_GARTAPERTURECTL, &tmp);
+ tmp |= GARTEN;
+ tmp &= ~(DISGARTCPU | DISGARTIO);
+ pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
+
+ /* keep CPU's coherent. */
+ flush_amd64_tlb (hammer);
+
+ return aper_base;
+}
+
+
+static struct aper_size_info_32 amd_8151_sizes[7] =
+{
+ {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
+ {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
+ {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
+ {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
+ {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
+ {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
+ {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
+};
+
+static int amd_8151_configure(void)
+{
+ unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
+
+ /* Configure AGP regs in each x86-64 host bridge. */
+ for_each_nb() {
+ agp_bridge->gart_bus_addr =
+ amd64_configure(hammers[gart_iterator],gatt_bus);
+ }
+ return 0;
+}
+
+
+static void amd64_cleanup(void)
+{
+ u32 tmp;
+
+ for_each_nb() {
+ /* disable gart translation */
+ pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp);
+ tmp &= ~AMD64_GARTEN;
+ pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp);
+ }
+}
+
+
+struct agp_bridge_driver amd_8151_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_8151_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = amd_8151_configure,
+ .fetch_size = amd64_fetch_size,
+ .cleanup = amd64_cleanup,
+ .tlb_flush = amd64_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = amd64_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+/* Some basic sanity checks for the aperture. */
+static int __devinit aperture_valid(u64 aper, u32 size)
+{
+ u32 pfn, c;
+ if (aper == 0) {
+ printk(KERN_ERR PFX "No aperture\n");
+ return 0;
+ }
+ if (size < 32*1024*1024) {
+ printk(KERN_ERR PFX "Aperture too small (%d MB)\n", size>>20);
+ return 0;
+ }
+ if (aper + size > 0xffffffff) {
+ printk(KERN_ERR PFX "Aperture out of bounds\n");
+ return 0;
+ }
+ pfn = aper >> PAGE_SHIFT;
+ for (c = 0; c < size/PAGE_SIZE; c++) {
+ if (!pfn_valid(pfn + c))
+ break;
+ if (!PageReserved(pfn_to_page(pfn + c))) {
+ printk(KERN_ERR PFX "Aperture pointing to RAM\n");
+ return 0;
+ }
+ }
+
+ /* Request the Aperture. This catches cases when someone else
+ already put a mapping in there - happens with some very broken BIOS
+
+ Maybe better to use pci_assign_resource/pci_enable_device instead
+ trusting the bridges? */
+ if (!aperture_resource &&
+ !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
+ printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * W*s centric BIOS sometimes only set up the aperture in the AGP
+ * bridge, not the northbridge. On AMD64 this is handled early
+ * in aperture.c, but when GART_IOMMU is not enabled or we run
+ * on a 32bit kernel this needs to be redone.
+ * Unfortunately it is impossible to fix the aperture here because it's too late
+ * to allocate that much memory. But at least error out cleanly instead of
+ * crashing.
+ */
+static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
+ u16 cap)
+{
+ u32 aper_low, aper_hi;
+ u64 aper, nb_aper;
+ int order = 0;
+ u32 nb_order, nb_base;
+ u16 apsize;
+
+ pci_read_config_dword(nb, 0x90, &nb_order);
+ nb_order = (nb_order >> 1) & 7;
+ pci_read_config_dword(nb, 0x94, &nb_base);
+ nb_aper = nb_base << 25;
+ if (aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
+ return 0;
+ }
+
+ /* Northbridge seems to contain crap. Try the AGP bridge. */
+
+ pci_read_config_word(agp, cap+0x14, &apsize);
+ if (apsize == 0xffff)
+ return -1;
+
+ apsize &= 0xfff;
+ /* Some BIOS use weird encodings not in the AGPv3 table. */
+ if (apsize & 0xff)
+ apsize |= 0xf00;
+ order = 7 - hweight16(apsize);
+
+ pci_read_config_dword(agp, 0x10, &aper_low);
+ pci_read_config_dword(agp, 0x14, &aper_hi);
+ aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
+ printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
+ if (order < 0 || !aperture_valid(aper, (32*1024*1024)<<order))
+ return -1;
+
+ pci_write_config_dword(nb, 0x90, order << 1);
+ pci_write_config_dword(nb, 0x94, aper >> 25);
+
+ return 0;
+}
+
+static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
+{
+ struct pci_dev *loop_dev = NULL;
+ int i = 0;
+
+ /* cache pci_devs of northbridges. */
+ while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
+ != NULL) {
+ if (i == MAX_HAMMER_GARTS) {
+ printk(KERN_ERR PFX "Too many northbridges for AGP\n");
+ return -1;
+ }
+ if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) {
+ printk(KERN_ERR PFX "No usable aperture found.\n");
+#ifdef __x86_64__
+ /* should port this to i386 */
+ printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n");
+#endif
+ return -1;
+ }
+ hammers[i++] = loop_dev;
+ }
+ nr_garts = i;
+ return i == 0 ? -1 : 0;
+}
+
+/* Handle AMD 8151 quirks */
+static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+{
+ char *revstring;
+ u8 rev_id;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+ switch (rev_id) {
+ case 0x01: revstring="A0"; break;
+ case 0x02: revstring="A1"; break;
+ case 0x11: revstring="B0"; break;
+ case 0x12: revstring="B1"; break;
+ case 0x13: revstring="B2"; break;
+ case 0x14: revstring="B3"; break;
+ default: revstring="??"; break;
+ }
+
+ printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring);
+
+ /*
+ * Work around errata.
+ * Chips before B2 stepping incorrectly reporting v3.5
+ */
+ if (rev_id < 0x13) {
+ printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n");
+ bridge->major_version = 3;
+ bridge->minor_version = 0;
+ }
+}
+
+
+static struct aper_size_info_32 uli_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+static int __devinit uli_agp_init(struct pci_dev *pdev)
+{
+ u32 httfea,baseaddr,enuscr;
+ struct pci_dev *dev1;
+ int i;
+ unsigned size = amd64_fetch_size();
+ printk(KERN_INFO "Setting up ULi AGP. \n");
+ dev1 = pci_find_slot ((unsigned int)pdev->bus->number,PCI_DEVFN(0,0));
+ if (dev1 == NULL) {
+ printk(KERN_INFO PFX "Detected a ULi chipset, "
+ "but could not fine the secondary device.\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
+ if (uli_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(uli_sizes)) {
+ printk(KERN_INFO PFX "No ULi size found for %d\n", size);
+ return -ENODEV;
+ }
+
+ /* shadow x86-64 registers into ULi registers */
+ pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ((httfea & 0x7fff) >> (32 - 25))
+ return -ENODEV;
+
+ httfea = (httfea& 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
+ baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
+ baseaddr|= httfea;
+ pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
+
+ enuscr= httfea+ (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
+ pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
+ return 0;
+}
+
+
+static struct aper_size_info_32 nforce3_sizes[5] =
+{
+ {512, 131072, 7, 0x00000000 },
+ {256, 65536, 6, 0x00000008 },
+ {128, 32768, 5, 0x0000000C },
+ {64, 16384, 4, 0x0000000E },
+ {32, 8192, 3, 0x0000000F }
+};
+
+/* Handle shadow device of the Nvidia NForce3 */
+/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
+static int __devinit nforce3_agp_init(struct pci_dev *pdev)
+{
+ u32 tmp, apbase, apbar, aplimit;
+ struct pci_dev *dev1;
+ int i;
+ unsigned size = amd64_fetch_size();
+
+ printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n");
+
+ dev1 = pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(11, 0));
+ if (dev1 == NULL) {
+ printk(KERN_INFO PFX "agpgart: Detected an NVIDIA "
+ "nForce3 chipset, but could not find "
+ "the secondary device.\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
+ if (nforce3_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(nforce3_sizes)) {
+ printk(KERN_INFO PFX "No NForce3 size found for %d\n", size);
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
+ tmp &= ~(0xf);
+ tmp |= nforce3_sizes[i].size_value;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
+
+ /* shadow x86-64 registers into NVIDIA registers */
+ pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ( (apbase & 0x7fff) >> (32 - 25) )
+ return -ENODEV;
+
+ apbase = (apbase & 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
+ apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
+ apbar |= apbase;
+ pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
+
+ aplimit = apbase + (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+
+ return 0;
+}
+
+static int __devinit agp_amd64_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Could check for AGPv3 here */
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
+ amd8151_init(pdev, bridge);
+ } else {
+ printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn);
+ }
+
+ bridge->driver = &amd_8151_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ if (cache_nbs(pdev, cap_ptr) == -1) {
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+ int ret = nforce3_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL) {
+ int ret = uli_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_amd64_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ release_mem_region(virt_to_phys(bridge->gatt_table_real),
+ amd64_aperture_sizes[bridge->aperture_size_idx].size);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_amd64_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_8151_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* ULi M1689 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_DEVICE_ID_AL_M1689,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800Pro */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8385_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8M800 / K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8380_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T890 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_3238_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800/K8M800/K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_838X_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* NForce3 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* SIS 755 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_755,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+
+static struct pci_driver agp_amd64_pci_driver = {
+ .name = "agpgart-amd64",
+ .id_table = agp_amd64_pci_table,
+ .probe = agp_amd64_probe,
+ .remove = agp_amd64_remove,
+};
+
+
+/* Not static due to IOMMU code calling it early. */
+int __init agp_amd64_init(void)
+{
+ int err = 0;
+ static struct pci_device_id amd64nb[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
+ { },
+ };
+
+ if (agp_off)
+ return -EINVAL;
+ if (pci_register_driver(&agp_amd64_pci_driver) > 0) {
+ struct pci_dev *dev;
+ if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+ printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+#ifdef MODULE
+ printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
+#else
+ printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
+#endif
+ return -ENODEV;
+ }
+
+ /* First check that we have at least one AMD64 NB */
+ if (!pci_dev_present(amd64nb))
+ return -ENODEV;
+
+ /* Look for any AGP bridge */
+ dev = NULL;
+ err = -ENODEV;
+ for_each_pci_dev(dev) {
+ if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
+ continue;
+ /* Only one bridge supported right now */
+ if (agp_amd64_probe(dev, NULL) == 0) {
+ err = 0;
+ break;
+ }
+ }
+ }
+ return err;
+}
+
+static void __exit agp_amd64_cleanup(void)
+{
+ if (aperture_resource)
+ release_resource(aperture_resource);
+ pci_unregister_driver(&agp_amd64_pci_driver);
+}
+
+/* On AMD64 the PCI driver needs to initialize this driver early
+ for the IOMMU, so it has to be called via a backdoor. */
+#ifndef CONFIG_GART_IOMMU
+module_init(agp_amd64_init);
+module_exit(agp_amd64_cleanup);
+#endif
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>, Andi Kleen");
+module_param(agp_try_unsupported, bool, 0);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
new file mode 100644
index 00000000000..757dde006fc
--- /dev/null
+++ b/drivers/char/agp/ati-agp.c
@@ -0,0 +1,548 @@
+/*
+ * ATi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/agp.h>
+#include "agp.h"
+
+#define ATI_GART_MMBASE_ADDR 0x14
+#define ATI_RS100_APSIZE 0xac
+#define ATI_RS100_IG_AGPMODE 0xb0
+#define ATI_RS300_APSIZE 0xf8
+#define ATI_RS300_IG_AGPMODE 0xfc
+#define ATI_GART_FEATURE_ID 0x00
+#define ATI_GART_BASE 0x04
+#define ATI_GART_CACHE_SZBASE 0x08
+#define ATI_GART_CACHE_CNTRL 0x0c
+#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
+
+
+static struct aper_size_info_lvl2 ati_generic_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static struct gatt_mask ati_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+
+typedef struct _ati_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+} ati_page_map;
+
+static struct _ati_generic_private {
+ volatile u8 __iomem *registers;
+ ati_page_map **gatt_pages;
+ int num_tables;
+} ati_generic_private;
+
+static int ati_create_page_map(ati_page_map *page_map)
+{
+ int i, err = 0;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ SetPageReserved(virt_to_page(page_map->real));
+ err = map_page_into_agp(virt_to_page(page_map->real));
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ PAGE_SIZE);
+ if (page_map->remapped == NULL || err) {
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+ page_map->real = NULL;
+ return -ENOMEM;
+ }
+ /*CACHE_FLUSH();*/
+ global_cache_flush();
+
+ for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+
+static void ati_free_page_map(ati_page_map *page_map)
+{
+ unmap_page_from_agp(virt_to_page(page_map->real));
+ iounmap(page_map->remapped);
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+}
+
+
+static void ati_free_gatt_pages(void)
+{
+ int i;
+ ati_page_map **tables;
+ ati_page_map *entry;
+
+ tables = ati_generic_private.gatt_pages;
+ for(i = 0; i < ati_generic_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ ati_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+
+static int ati_create_gatt_pages(int nr_tables)
+{
+ ati_page_map **tables;
+ ati_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1));
+ for (i = 0; i < nr_tables; i++) {
+ entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ while (i>0) {
+ kfree (tables[i-1]);
+ i--;
+ }
+ kfree (tables);
+ tables = NULL;
+ retval = -ENOMEM;
+ break;
+ }
+ memset(entry, 0, sizeof(ati_page_map));
+ tables[i] = entry;
+ retval = ati_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ ati_generic_private.num_tables = nr_tables;
+ ati_generic_private.gatt_pages = tables;
+
+ if (retval != 0) ati_free_gatt_pages();
+
+ return retval;
+}
+
+static int is_r200(void)
+{
+ if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
+ return 1;
+ return 0;
+}
+
+static int ati_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ if (is_r200())
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ else
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ati_tlbflush(struct agp_memory * mem)
+{
+ writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
+ readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
+}
+
+static void ati_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ /* Write back the previous size and disable gart translation */
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ }
+ iounmap((volatile u8 __iomem *)ati_generic_private.registers);
+}
+
+
+static int ati_configure(void)
+{
+ u32 temp;
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp);
+ temp = (temp & 0xfffff000);
+ ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+
+ if (is_r200())
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
+ else
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
+
+ /* address to map too */
+ /*
+ pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
+ */
+ writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
+ readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
+
+ /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
+ pci_read_config_dword(agp_bridge->dev, 4, &temp);
+ pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
+ readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
+
+ return 0;
+}
+
+
+/*
+ *Since we don't need contigious memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int ati_insert_memory(struct agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ /*CACHE_FLUSH(); */
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ ati_page_map page_dir;
+ unsigned long addr;
+ int retval;
+ u32 temp;
+ int i;
+ struct aper_size_info_lvl2 *current_size;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = ati_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = ati_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ ati_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
+
+ /* Write out the size register */
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ }
+
+ /*
+ * Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static int ati_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ ati_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ ati_free_gatt_pages();
+ ati_free_page_map(&page_dir);
+ return 0;
+}
+
+struct agp_bridge_driver ati_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ati_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = ati_configure,
+ .fetch_size = ati_fetch_size,
+ .cleanup = ati_cleanup,
+ .tlb_flush = ati_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = ati_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = ati_create_gatt_table,
+ .free_gatt_table = ati_free_gatt_table,
+ .insert_memory = ati_insert_memory,
+ .remove_memory = ati_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+
+static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS100,
+ .chipset_name = "IGP320/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200,
+ .chipset_name = "IGP330/340/345/350/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200_B,
+ .chipset_name = "IGP345M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS250,
+ .chipset_name = "IGP7000/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_100,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_133,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_166,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_200,
+ .chipset_name = "IGP9100/M",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_ati_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ati_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ printk(KERN_ERR PFX
+ "Unsupported Ati chipset (device id: %04x)\n", pdev->device);
+ return -ENODEV;
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ bridge->driver = &ati_generic_bridge;
+
+
+ printk(KERN_INFO PFX "Detected Ati %s chipset\n",
+ devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_ati_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_ati_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_ATI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
+
+static struct pci_driver agp_ati_pci_driver = {
+ .name = "agpgart-ati",
+ .id_table = agp_ati_pci_table,
+ .probe = agp_ati_probe,
+ .remove = agp_ati_remove,
+};
+
+static int __init agp_ati_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ati_pci_driver);
+}
+
+static void __exit agp_ati_cleanup(void)
+{
+ pci_unregister_driver(&agp_ati_pci_driver);
+}
+
+module_init(agp_ati_init);
+module_exit(agp_ati_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
new file mode 100644
index 00000000000..c3442f3c648
--- /dev/null
+++ b/drivers/char/agp/backend.c
@@ -0,0 +1,348 @@
+/*
+ * AGPGART driver backend routines.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include "agp.h"
+
+/* Due to XFree86 brain-damage, we can't go to 1.0 until they
+ * fix some real stupidity. It's only by chance we can bump
+ * past 0.99 at all due to some boolean logic error. */
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 101
+static struct agp_version agp_current_version =
+{
+ .major = AGPGART_VERSION_MAJOR,
+ .minor = AGPGART_VERSION_MINOR,
+};
+
+struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
+ &agp_generic_find_bridge;
+
+struct agp_bridge_data *agp_bridge;
+LIST_HEAD(agp_bridges);
+EXPORT_SYMBOL(agp_bridge);
+EXPORT_SYMBOL(agp_bridges);
+EXPORT_SYMBOL(agp_find_bridge);
+
+/**
+ * agp_backend_acquire - attempt to acquire an agp backend.
+ *
+ */
+struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = agp_find_bridge(pdev);
+
+ if (!bridge)
+ return NULL;
+
+ if (atomic_read(&bridge->agp_in_use))
+ return NULL;
+ atomic_inc(&bridge->agp_in_use);
+ return bridge;
+}
+EXPORT_SYMBOL(agp_backend_acquire);
+
+
+/**
+ * agp_backend_release - release the lock on the agp backend.
+ *
+ * The caller must insure that the graphics aperture translation table
+ * is read for use by another entity.
+ *
+ * (Ensure that all memory it bound is unbound.)
+ */
+void agp_backend_release(struct agp_bridge_data *bridge)
+{
+
+ if (bridge)
+ atomic_dec(&bridge->agp_in_use);
+}
+EXPORT_SYMBOL(agp_backend_release);
+
+
+struct { int mem, agp; } maxes_table[] = {
+ {0, 0},
+ {32, 4},
+ {64, 28},
+ {128, 96},
+ {256, 204},
+ {512, 440},
+ {1024, 942},
+ {2048, 1920},
+ {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+ long memory, index, result;
+
+#if PAGE_SHIFT < 20
+ memory = num_physpages >> (20 - PAGE_SHIFT);
+#else
+ memory = num_physpages << (PAGE_SHIFT - 20);
+#endif
+ index = 1;
+
+ while ((memory > maxes_table[index].mem) && (index < 8))
+ index++;
+
+ result = maxes_table[index - 1].agp +
+ ( (memory - maxes_table[index - 1].mem) *
+ (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+ (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+ result = result << (20 - PAGE_SHIFT);
+ return result;
+}
+
+
+static int agp_backend_initialize(struct agp_bridge_data *bridge)
+{
+ int size_value, rc, got_gatt=0, got_keylist=0;
+
+ bridge->max_memory_agp = agp_find_max();
+ bridge->version = &agp_current_version;
+
+ if (bridge->driver->needs_scratch_page) {
+ void *addr = bridge->driver->agp_alloc_page(bridge);
+
+ if (!addr) {
+ printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
+ return -ENOMEM;
+ }
+
+ bridge->scratch_page_real = virt_to_phys(addr);
+ bridge->scratch_page =
+ bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
+ }
+
+ size_value = bridge->driver->fetch_size();
+ if (size_value == 0) {
+ printk(KERN_ERR PFX "unable to determine aperture size.\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ if (bridge->driver->create_gatt_table(bridge)) {
+ printk(KERN_ERR PFX
+ "unable to get memory for graphics translation table.\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_gatt = 1;
+
+ bridge->key_list = vmalloc(PAGE_SIZE * 4);
+ if (bridge->key_list == NULL) {
+ printk(KERN_ERR PFX "error allocating memory for key lists.\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_keylist = 1;
+
+ /* FIXME vmalloc'd memory not guaranteed contiguous */
+ memset(bridge->key_list, 0, PAGE_SIZE * 4);
+
+ if (bridge->driver->configure()) {
+ printk(KERN_ERR PFX "error configuring host chipset.\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ if (bridge->driver->needs_scratch_page)
+ bridge->driver->agp_destroy_page(
+ phys_to_virt(bridge->scratch_page_real));
+ if (got_gatt)
+ bridge->driver->free_gatt_table(bridge);
+ if (got_keylist) {
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+ }
+ return rc;
+}
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->cleanup)
+ bridge->driver->cleanup();
+ if (bridge->driver->free_gatt_table)
+ bridge->driver->free_gatt_table(bridge);
+ if (bridge->key_list) {
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+ }
+
+ if (bridge->driver->agp_destroy_page &&
+ bridge->driver->needs_scratch_page)
+ bridge->driver->agp_destroy_page(
+ phys_to_virt(bridge->scratch_page_real));
+}
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_alloc_bridge(void)
+{
+ struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL);
+
+ if (!bridge)
+ return NULL;
+
+ memset(bridge, 0, sizeof(*bridge));
+ atomic_set(&bridge->agp_in_use, 0);
+ atomic_set(&bridge->current_memory_agp, 0);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = bridge;
+
+ return bridge;
+}
+EXPORT_SYMBOL(agp_alloc_bridge);
+
+
+void agp_put_bridge(struct agp_bridge_data *bridge)
+{
+ kfree(bridge);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = NULL;
+}
+EXPORT_SYMBOL(agp_put_bridge);
+
+
+int agp_add_bridge(struct agp_bridge_data *bridge)
+{
+ int error;
+
+ if (agp_off)
+ return -ENODEV;
+
+ if (!bridge->dev) {
+ printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
+ return -EINVAL;
+ }
+
+ /* Grab reference on the chipset driver. */
+ if (!try_module_get(bridge->driver->owner)) {
+ printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
+ return -EINVAL;
+ }
+
+ error = agp_backend_initialize(bridge);
+ if (error) {
+ printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
+ goto err_out;
+ }
+
+ if (list_empty(&agp_bridges)) {
+ error = agp_frontend_initialize();
+ if (error) {
+ printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
+ goto frontend_err;
+ }
+
+ printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
+
+ }
+
+ list_add(&bridge->list, &agp_bridges);
+ return 0;
+
+frontend_err:
+ agp_backend_cleanup(bridge);
+err_out:
+ module_put(bridge->driver->owner);
+ agp_put_bridge(bridge);
+ return error;
+}
+EXPORT_SYMBOL_GPL(agp_add_bridge);
+
+
+void agp_remove_bridge(struct agp_bridge_data *bridge)
+{
+ agp_backend_cleanup(bridge);
+ list_del(&bridge->list);
+ if (list_empty(&agp_bridges))
+ agp_frontend_cleanup();
+ module_put(bridge->driver->owner);
+}
+EXPORT_SYMBOL_GPL(agp_remove_bridge);
+
+int agp_off;
+int agp_try_unsupported_boot;
+EXPORT_SYMBOL(agp_off);
+EXPORT_SYMBOL(agp_try_unsupported_boot);
+
+static int __init agp_init(void)
+{
+ if (!agp_off)
+ printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n",
+ AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+ return 0;
+}
+
+void __exit agp_exit(void)
+{
+}
+
+#ifndef MODULE
+static __init int agp_setup(char *s)
+{
+ if (!strcmp(s,"off"))
+ agp_off = 1;
+ if (!strcmp(s,"try_unsupported"))
+ agp_try_unsupported_boot = 1;
+ return 1;
+}
+__setup("agp=", agp_setup);
+#endif
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_DESCRIPTION("AGP GART driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+
+module_init(agp_init);
+module_exit(agp_exit);
+
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
new file mode 100644
index 00000000000..52c0a097118
--- /dev/null
+++ b/drivers/char/agp/efficeon-agp.c
@@ -0,0 +1,463 @@
+/*
+ * Transmeta's Efficeon AGPGART driver.
+ *
+ * Based upon a diff by Linus around November '02.
+ *
+ * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
+ * and H. Peter Anvin <hpa@transmeta.com>.
+ */
+
+/*
+ * NOTE-cpg-040217:
+ *
+ * - when compiled as a module, after loading the module,
+ * it will refuse to unload, indicating it is in use,
+ * when it is not.
+ * - no s3 (suspend to ram) testing.
+ * - tested on the efficeon integrated nothbridge for tens
+ * of iterations of starting x and glxgears.
+ * - tested with radeon 9000 and radeon mobility m9 cards
+ * - tested with c3/c4 enabled (with the mobility m9 card)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+
+/*
+ * The real differences to the generic AGP code is
+ * in the GART mappings - a two-level setup with the
+ * first level being an on-chip 64-entry table.
+ *
+ * The page array is filled through the ATTPAGE register
+ * (Aperture Translation Table Page Register) at 0xB8. Bits:
+ * 31:20: physical page address
+ * 11:9: Page Attribute Table Index (PATI)
+ * must match the PAT index for the
+ * mapped pages (the 2nd level page table pages
+ * themselves should be just regular WB-cacheable,
+ * so this is normally zero.)
+ * 8: Present
+ * 7:6: reserved, write as zero
+ * 5:0: GATT directory index: which 1st-level entry
+ *
+ * The Efficeon AGP spec requires pages to be WB-cacheable
+ * but to be explicitly CLFLUSH'd after any changes.
+ */
+#define EFFICEON_ATTPAGE 0xb8
+#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
+
+#define EFFICEON_PATI (0 << 9)
+#define EFFICEON_PRESENT (1 << 8)
+
+static struct _efficeon_private {
+ unsigned long l1_table[EFFICEON_L1_SIZE];
+} efficeon_private;
+
+static struct gatt_mask efficeon_generic_masks[] =
+{
+ {.mask = 0x00000001, .type = 0}
+};
+
+static struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
+{
+ {256, 65536, 0},
+ {128, 32768, 32},
+ {64, 16384, 48},
+ {32, 8192, 56}
+};
+
+/*
+ * Control interfaces are largely identical to
+ * the legacy Intel 440BX..
+ */
+
+static int efficeon_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void efficeon_tlbflush(struct agp_memory * mem)
+{
+ printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void efficeon_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_lvl2 *previous_size;
+
+ printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+static int efficeon_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_lvl2 *current_size;
+
+ printk(KERN_DEBUG PFX "efficeon_configure()\n");
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index, freed = 0;
+
+ for (index = 0; index < EFFICEON_L1_SIZE; index++) {
+ unsigned long page = efficeon_private.l1_table[index];
+ if (page) {
+ efficeon_private.l1_table[index] = 0;
+ ClearPageReserved(virt_to_page((char *)page));
+ free_page(page);
+ freed++;
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
+ agp_bridge->dev, EFFICEON_ATTPAGE, index);
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, index);
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
+ return 0;
+}
+
+
+/*
+ * Since we don't need contigious memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (efficeon_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index;
+ const int pati = EFFICEON_PATI;
+ const int present = EFFICEON_PRESENT;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ int num_entries, l1_pages;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
+
+ /* There are 2^10 PTE pages per PDE page */
+ BUG_ON(num_entries & 0x3ff);
+ l1_pages = num_entries >> 10;
+
+ for (index = 0 ; index < l1_pages ; index++) {
+ int offset;
+ unsigned long page;
+ unsigned long value;
+
+ page = efficeon_private.l1_table[index];
+ BUG_ON(page);
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ efficeon_free_gatt_table(agp_bridge);
+ return -ENOMEM;
+ }
+ SetPageReserved(virt_to_page((char *)page));
+
+ for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
+ asm volatile("clflush %0" : : "m" (*(char *)(page+offset)));
+
+ efficeon_private.l1_table[index] = page;
+
+ value = __pa(page) | pati | present | index;
+
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, value);
+ }
+
+ return 0;
+}
+
+static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+ unsigned int *page, *last_page;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ const unsigned long clflush_mask = ~(clflush_chunk-1);
+
+ printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ last_page = NULL;
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned long insert = mem->memory[i];
+
+ page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+
+ page += (index & 0x3ff);
+ *page = insert;
+
+ /* clflush is slow, so don't clflush until we have to */
+ if ( last_page &&
+ ((unsigned long)page^(unsigned long)last_page) & clflush_mask )
+ asm volatile("clflush %0" : : "m" (*last_page));
+
+ last_page = page;
+ }
+
+ if ( last_page )
+ asm volatile("clflush %0" : : "m" (*last_page));
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+ page += (index & 0x3ff);
+ *page = 0;
+ }
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+struct agp_bridge_driver efficeon_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = efficeon_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = efficeon_configure,
+ .fetch_size = efficeon_fetch_size,
+ .cleanup = efficeon_cleanup,
+ .tlb_flush = efficeon_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = efficeon_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+
+ // Efficeon-specific GATT table setup / populate / teardown
+ .create_gatt_table = efficeon_create_gatt_table,
+ .free_gatt_table = efficeon_free_gatt_table,
+ .insert_memory = efficeon_insert_memory,
+ .remove_memory = efficeon_remove_memory,
+ .cant_use_aperture = 0, // 1 might be faster?
+
+ // Generic
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+
+static int agp_efficeon_resume(struct pci_dev *pdev)
+{
+ printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
+ return efficeon_configure();
+}
+
+static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ struct resource *r;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Probe for Efficeon controller */
+ if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
+ printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &efficeon_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if(pci_assign_resource(pdev, 0)) {
+ printk(KERN_ERR PFX "could not assign resource 0\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_efficeon_suspend(struct pci_dev *dev, u32 state)
+{
+ return 0;
+}
+
+
+static struct pci_device_id agp_efficeon_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_TRANSMETA,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
+
+static struct pci_driver agp_efficeon_pci_driver = {
+ .name = "agpgart-efficeon",
+ .id_table = agp_efficeon_pci_table,
+ .probe = agp_efficeon_probe,
+ .remove = agp_efficeon_remove,
+ .suspend = agp_efficeon_suspend,
+ .resume = agp_efficeon_resume,
+};
+
+static int __init agp_efficeon_init(void)
+{
+ static int agp_initialised=0;
+
+ if (agp_off)
+ return -EINVAL;
+
+ if (agp_initialised == 1)
+ return 0;
+ agp_initialised=1;
+
+ return pci_register_driver(&agp_efficeon_pci_driver);
+}
+
+static void __exit agp_efficeon_cleanup(void)
+{
+ pci_unregister_driver(&agp_efficeon_pci_driver);
+}
+
+module_init(agp_efficeon_init);
+module_exit(agp_efficeon_cleanup);
+
+MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
new file mode 100644
index 00000000000..f633623ac80
--- /dev/null
+++ b/drivers/char/agp/frontend.c
@@ -0,0 +1,1103 @@
+/*
+ * AGPGART driver frontend
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+static struct agp_front_data agp_fe;
+
+static struct agp_memory *agp_find_mem_by_key(int key)
+{
+ struct agp_memory *curr;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ curr = agp_fe.current_controller->pool;
+
+ while (curr != NULL) {
+ if (curr->key == key)
+ break;
+ curr = curr->next;
+ }
+
+ DBG("key=%d -> mem=%p", key, curr);
+ return curr;
+}
+
+static void agp_remove_from_pool(struct agp_memory *temp)
+{
+ struct agp_memory *prev;
+ struct agp_memory *next;
+
+ /* Check to see if this is even in the memory pool */
+
+ DBG("mem=%p", temp);
+ if (agp_find_mem_by_key(temp->key) != NULL) {
+ next = temp->next;
+ prev = temp->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ /* This is the first item on the list */
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.current_controller->pool = next;
+ }
+ }
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static struct
+agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
+ unsigned long offset,
+ int size, pgprot_t page_prot)
+{
+ struct agp_segment_priv *seg;
+ int num_segments, i;
+ off_t pg_start;
+ size_t pg_count;
+
+ pg_start = offset / 4096;
+ pg_count = size / 4096;
+ seg = *(client->segments);
+ num_segments = client->num_segments;
+
+ for (i = 0; i < client->num_segments; i++) {
+ if ((seg[i].pg_start == pg_start) &&
+ (seg[i].pg_count == pg_count) &&
+ (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+ return seg + i;
+ }
+ }
+
+ return NULL;
+}
+
+static void agp_remove_seg_from_client(struct agp_client *client)
+{
+ DBG("client=%p", client);
+
+ if (client->segments != NULL) {
+ if (*(client->segments) != NULL) {
+ DBG("Freeing %p from client %p", *(client->segments), client);
+ kfree(*(client->segments));
+ }
+ DBG("Freeing %p from client %p", client->segments, client);
+ kfree(client->segments);
+ client->segments = NULL;
+ }
+}
+
+static void agp_add_seg_to_client(struct agp_client *client,
+ struct agp_segment_priv ** seg, int num_segments)
+{
+ struct agp_segment_priv **prev_seg;
+
+ prev_seg = client->segments;
+
+ if (prev_seg != NULL)
+ agp_remove_seg_from_client(client);
+
+ DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
+ client->num_segments = num_segments;
+ client->segments = seg;
+}
+
+/* Originally taken from linux/mm/mmap.c from the array
+ * protection_map.
+ * The original really should be exported to modules, or
+ * some routine which does the conversion for you
+ */
+
+static const pgprot_t my_protect_map[16] =
+{
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+#define _trans(x,bit1,bit2) \
+((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
+
+ unsigned long prot_bits;
+ pgprot_t temp;
+
+ prot_bits = _trans(prot, PROT_READ, VM_READ) |
+ _trans(prot, PROT_WRITE, VM_WRITE) |
+ _trans(prot, PROT_EXEC, VM_EXEC);
+
+ prot_bits |= VM_SHARED;
+
+ temp = my_protect_map[prot_bits & 0x0000000f];
+
+ return temp;
+}
+
+static int agp_create_segment(struct agp_client *client, struct agp_region *region)
+{
+ struct agp_segment_priv **ret_seg;
+ struct agp_segment_priv *seg;
+ struct agp_segment *user_seg;
+ size_t i;
+
+ seg = kmalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+ if (seg == NULL) {
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+ return -ENOMEM;
+ }
+ memset(seg, 0, (sizeof(struct agp_segment_priv) * region->seg_count));
+ user_seg = region->seg_list;
+
+ for (i = 0; i < region->seg_count; i++) {
+ seg[i].pg_start = user_seg[i].pg_start;
+ seg[i].pg_count = user_seg[i].pg_count;
+ seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+ }
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+
+ ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+ if (ret_seg == NULL) {
+ kfree(seg);
+ return -ENOMEM;
+ }
+ *ret_seg = seg;
+ agp_add_seg_to_client(client, ret_seg, region->seg_count);
+ return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(struct agp_memory * temp)
+{
+ struct agp_memory *prev;
+
+ prev = agp_fe.current_controller->pool;
+
+ if (prev != NULL) {
+ prev->prev = temp;
+ temp->next = prev;
+ }
+ agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+struct agp_file_private *agp_find_private(pid_t pid)
+{
+ struct agp_file_private *curr;
+
+ curr = agp_fe.file_priv_list;
+
+ while (curr != NULL) {
+ if (curr->my_pid == pid)
+ return curr;
+ curr = curr->next;
+ }
+
+ return NULL;
+}
+
+void agp_insert_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *prev;
+
+ prev = agp_fe.file_priv_list;
+
+ if (prev != NULL)
+ prev->prev = priv;
+ priv->next = prev;
+ agp_fe.file_priv_list = priv;
+}
+
+void agp_remove_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *next;
+ struct agp_file_private *prev;
+
+ next = priv->next;
+ prev = priv->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.file_priv_list = next;
+ }
+}
+
+/* End - File flag list routines */
+
+/*
+ * Wrappers for agp_free_memory & agp_allocate_memory
+ * These make sure that internal lists are kept updated.
+ */
+static void agp_free_memory_wrap(struct agp_memory *memory)
+{
+ agp_remove_from_pool(memory);
+ agp_free_memory(memory);
+}
+
+static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+ struct agp_memory *memory;
+
+ memory = agp_allocate_memory(agp_bridge, pg_count, type);
+ if (memory == NULL)
+ return NULL;
+
+ agp_insert_into_pool(memory);
+ return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static struct agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if (controller->pid == id)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_create_controller(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = kmalloc(sizeof(struct agp_controller), GFP_KERNEL);
+
+ if (controller == NULL)
+ return NULL;
+
+ memset(controller, 0, sizeof(struct agp_controller));
+ controller->pid = id;
+
+ return controller;
+}
+
+static int agp_insert_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+
+ prev_controller = agp_fe.controllers;
+ controller->next = prev_controller;
+
+ if (prev_controller != NULL)
+ prev_controller->prev = controller;
+
+ agp_fe.controllers = controller;
+
+ return 0;
+}
+
+static void agp_remove_all_clients(struct agp_controller *controller)
+{
+ struct agp_client *client;
+ struct agp_client *temp;
+
+ client = controller->clients;
+
+ while (client) {
+ struct agp_file_private *priv;
+
+ temp = client;
+ agp_remove_seg_from_client(temp);
+ priv = agp_find_private(temp->pid);
+
+ if (priv != NULL) {
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ client = client->next;
+ kfree(temp);
+ }
+}
+
+static void agp_remove_all_memory(struct agp_controller *controller)
+{
+ struct agp_memory *memory;
+ struct agp_memory *temp;
+
+ memory = controller->pool;
+
+ while (memory) {
+ temp = memory;
+ memory = memory->next;
+ agp_free_memory_wrap(temp);
+ }
+}
+
+static int agp_remove_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+ struct agp_controller *next_controller;
+
+ prev_controller = controller->prev;
+ next_controller = controller->next;
+
+ if (prev_controller != NULL) {
+ prev_controller->next = next_controller;
+ if (next_controller != NULL)
+ next_controller->prev = prev_controller;
+
+ } else {
+ if (next_controller != NULL)
+ next_controller->prev = NULL;
+
+ agp_fe.controllers = next_controller;
+ }
+
+ agp_remove_all_memory(controller);
+ agp_remove_all_clients(controller);
+
+ if (agp_fe.current_controller == controller) {
+ agp_fe.current_controller = NULL;
+ agp_fe.backend_acquired = FALSE;
+ agp_backend_release(agp_bridge);
+ }
+ kfree(controller);
+ return 0;
+}
+
+static void agp_controller_make_current(struct agp_controller *controller)
+{
+ struct agp_client *clients;
+
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL) {
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(struct agp_controller *controller,
+ struct agp_file_private *controller_priv)
+{
+ struct agp_client *clients;
+
+ clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL)
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = NULL;
+ agp_fe.used_by_controller = FALSE;
+ agp_backend_release(agp_bridge);
+}
+
+/*
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static struct agp_client
+*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
+{
+ struct agp_client *client;
+
+ if (controller == NULL)
+ return NULL;
+
+ client = controller->clients;
+
+ while (client != NULL) {
+ if (client->pid == id)
+ return client;
+ client = client->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_find_controller_for_client(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if ((agp_find_client_in_controller(controller, id)) != NULL)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_client *agp_find_client_by_pid(pid_t id)
+{
+ struct agp_client *temp;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+ return temp;
+}
+
+static void agp_insert_client(struct agp_client *client)
+{
+ struct agp_client *prev_client;
+
+ prev_client = agp_fe.current_controller->clients;
+ client->next = prev_client;
+
+ if (prev_client != NULL)
+ prev_client->prev = client;
+
+ agp_fe.current_controller->clients = client;
+ agp_fe.current_controller->num_clients++;
+}
+
+static struct agp_client *agp_create_client(pid_t id)
+{
+ struct agp_client *new_client;
+
+ new_client = kmalloc(sizeof(struct agp_client), GFP_KERNEL);
+
+ if (new_client == NULL)
+ return NULL;
+
+ memset(new_client, 0, sizeof(struct agp_client));
+ new_client->pid = id;
+ agp_insert_client(new_client);
+ return new_client;
+}
+
+static int agp_remove_client(pid_t id)
+{
+ struct agp_client *client;
+ struct agp_client *prev_client;
+ struct agp_client *next_client;
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_for_client(id);
+ if (controller == NULL)
+ return -EINVAL;
+
+ client = agp_find_client_in_controller(controller, id);
+ if (client == NULL)
+ return -EINVAL;
+
+ prev_client = client->prev;
+ next_client = client->next;
+
+ if (prev_client != NULL) {
+ prev_client->next = next_client;
+ if (next_client != NULL)
+ next_client->prev = prev_client;
+
+ } else {
+ if (next_client != NULL)
+ next_client->prev = NULL;
+ controller->clients = next_client;
+ }
+
+ controller->num_clients--;
+ agp_remove_seg_from_client(client);
+ kfree(client);
+ return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned int size, current_size;
+ unsigned long offset;
+ struct agp_client *client;
+ struct agp_file_private *priv = file->private_data;
+ struct agp_kern_info kerninfo;
+
+ down(&(agp_fe.agp_mutex));
+
+ if (agp_fe.backend_acquired != TRUE)
+ goto out_eperm;
+
+ if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
+ goto out_eperm;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+ size = vma->vm_end - vma->vm_start;
+ current_size = kerninfo.aper_size;
+ current_size = current_size * 0x100000;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ DBG("%lx:%lx", offset, offset+size);
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+ if ((size + offset) > current_size)
+ goto out_inval;
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client == NULL)
+ goto out_eperm;
+
+ if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
+ goto out_inval;
+
+ DBG("client vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ (kerninfo.aper_base + offset) >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+ goto out_again;
+ }
+ up(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ if (size != current_size)
+ goto out_inval;
+
+ DBG("controller vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ kerninfo.aper_base >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+ goto out_again;
+ }
+ up(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+out_eperm:
+ up(&(agp_fe.agp_mutex));
+ return -EPERM;
+
+out_inval:
+ up(&(agp_fe.agp_mutex));
+ return -EINVAL;
+
+out_again:
+ up(&(agp_fe.agp_mutex));
+ return -EAGAIN;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+ struct agp_file_private *priv = file->private_data;
+
+ down(&(agp_fe.agp_mutex));
+
+ DBG("priv=%p", priv);
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ if (controller == agp_fe.current_controller)
+ agp_controller_release_current(controller, priv);
+ agp_remove_controller(controller);
+ controller = NULL;
+ }
+ }
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
+ agp_remove_client(priv->my_pid);
+
+ agp_remove_file_private(priv);
+ kfree(priv);
+ file->private_data = NULL;
+ up(&(agp_fe.agp_mutex));
+ return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct agp_file_private *priv;
+ struct agp_client *client;
+ int rc = -ENXIO;
+
+ down(&(agp_fe.agp_mutex));
+
+ if (minor != AGPGART_MINOR)
+ goto err_out;
+
+ priv = kmalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+ if (priv == NULL)
+ goto err_out_nomem;
+
+ memset(priv, 0, sizeof(struct agp_file_private));
+ set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+ priv->my_pid = current->pid;
+
+ if ((current->uid == 0) || (current->suid == 0)) {
+ /* Root priv, can be controller */
+ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+ }
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ }
+ file->private_data = (void *) priv;
+ agp_insert_file_private(priv);
+ DBG("private=%p, client=%p", priv, client);
+ up(&(agp_fe.agp_mutex));
+ return 0;
+
+err_out_nomem:
+ rc = -ENOMEM;
+err_out:
+ up(&(agp_fe.agp_mutex));
+ return rc;
+}
+
+
+static ssize_t agp_read(struct file *file, char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static ssize_t agp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info userinfo;
+ struct agp_kern_info kerninfo;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int agpioc_acquire_wrap(struct agp_file_private *priv)
+{
+ struct agp_controller *controller;
+
+ DBG("");
+
+ if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
+ return -EPERM;
+
+ if (agp_fe.current_controller != NULL)
+ return -EBUSY;
+
+ if(!agp_bridge)
+ return -ENODEV;
+
+ if (atomic_read(&agp_bridge->agp_in_use))
+ return -EBUSY;
+
+ atomic_inc(&agp_bridge->agp_in_use);
+
+ agp_fe.backend_acquired = TRUE;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ agp_controller_make_current(controller);
+ } else {
+ controller = agp_create_controller(priv->my_pid);
+
+ if (controller == NULL) {
+ agp_fe.backend_acquired = FALSE;
+ agp_backend_release(agp_bridge);
+ return -ENOMEM;
+ }
+ agp_insert_controller(controller);
+ agp_controller_make_current(controller);
+ }
+
+ set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ return 0;
+}
+
+static int agpioc_release_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_controller_release_current(agp_fe.current_controller, priv);
+ return 0;
+}
+
+static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_setup mode;
+
+ DBG("");
+ if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
+ return -EFAULT;
+
+ agp_enable(agp_bridge, mode.agp_mode);
+ return 0;
+}
+
+static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region reserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+
+ if (reserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(reserve.pid);
+ } else {
+ struct agp_segment *segment;
+
+ if (reserve.seg_count >= 16384)
+ return -EINVAL;
+
+ segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
+ GFP_KERNEL);
+
+ if (segment == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(segment, (void __user *) reserve.seg_list,
+ sizeof(struct agp_segment) * reserve.seg_count)) {
+ kfree(segment);
+ return -EFAULT;
+ }
+ reserve.seg_list = segment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(reserve.pid);
+
+ if (client == NULL) {
+ kfree(segment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &reserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+static int agpioc_protect_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ /* This function is not currently implemented */
+ return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
+ return -EFAULT;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
+{
+ struct agp_memory *memory;
+
+ DBG("");
+ memory = agp_find_mem_by_key(arg);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ agp_free_memory_wrap(memory);
+ return 0;
+}
+
+static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+static int agp_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ DBG("priv=%p, cmd=%x", curr_priv, cmd);
+ down(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != TRUE) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO:
+ ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE:
+ ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE:
+ ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND:
+ ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND:
+ ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ up(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
+static struct file_operations agp_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = agp_read,
+ .write = agp_write,
+ .ioctl = agp_ioctl,
+ .mmap = agp_mmap,
+ .open = agp_open,
+ .release = agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+ .minor = AGPGART_MINOR,
+ .name = "agpgart",
+ .fops = &agp_fops
+};
+
+int agp_frontend_initialize(void)
+{
+ memset(&agp_fe, 0, sizeof(struct agp_front_data));
+ sema_init(&(agp_fe.agp_mutex), 1);
+
+ if (misc_register(&agp_miscdev)) {
+ printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+ return -EIO;
+ }
+ return 0;
+}
+
+void agp_frontend_cleanup(void)
+{
+ misc_deregister(&agp_miscdev);
+}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
new file mode 100644
index 00000000000..c321a924e38
--- /dev/null
+++ b/drivers/char/agp/generic.c
@@ -0,0 +1,1222 @@
+/*
+ * AGPGART driver.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2005 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+__u32 *agp_gatt_table;
+int agp_memory_reserved;
+
+/*
+ * Needed by the Nforce GART driver for the time being. Would be
+ * nice to do this some other way instead of needing this export.
+ */
+EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+#if defined(CONFIG_X86)
+int map_page_into_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+ global_flush_tlb();
+ return i;
+}
+EXPORT_SYMBOL_GPL(map_page_into_agp);
+
+int unmap_page_from_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL);
+ global_flush_tlb();
+ return i;
+}
+EXPORT_SYMBOL_GPL(unmap_page_from_agp);
+#endif
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+ */
+
+void agp_free_key(int key)
+{
+ if (key < 0)
+ return;
+
+ if (key < MAXKEY)
+ clear_bit(key, agp_bridge->key_list);
+}
+EXPORT_SYMBOL(agp_free_key);
+
+
+static int agp_get_key(void)
+{
+ int bit;
+
+ bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
+ if (bit < MAXKEY) {
+ set_bit(bit, agp_bridge->key_list);
+ return bit;
+ }
+ return -1;
+}
+
+
+struct agp_memory *agp_create_memory(int scratch_pages)
+{
+ struct agp_memory *new;
+
+ new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
+
+ if (new == NULL)
+ return NULL;
+
+ memset(new, 0, sizeof(struct agp_memory));
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+ new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+ if (new->memory == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = scratch_pages;
+ return new;
+}
+EXPORT_SYMBOL(agp_create_memory);
+
+/**
+ * agp_free_memory - free memory associated with an agp_memory pointer.
+ *
+ * @curr: agp_memory pointer to be freed.
+ *
+ * It is the only function that can be called when the backend is not owned
+ * by the caller. (So it can free memory on client death.)
+ */
+void agp_free_memory(struct agp_memory *curr)
+{
+ size_t i;
+
+ if (curr == NULL)
+ return;
+
+ if (curr->is_bound == TRUE)
+ agp_unbind_memory(curr);
+
+ if (curr->type != 0) {
+ curr->bridge->driver->free_by_type(curr);
+ return;
+ }
+ if (curr->page_count != 0) {
+ for (i = 0; i < curr->page_count; i++) {
+ curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
+ }
+ }
+ agp_free_key(curr->key);
+ vfree(curr->memory);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_free_memory);
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+/**
+ * agp_allocate_memory - allocate a group of pages of a certain type.
+ *
+ * @page_count: size_t argument of the number of pages
+ * @type: u32 argument of the type of memory to be allocated.
+ *
+ * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
+ * maps to physical ram. Any other type is device dependent.
+ *
+ * It returns NULL whenever memory is unavailable.
+ */
+struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ size_t page_count, u32 type)
+{
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+
+ if (!bridge)
+ return NULL;
+
+ if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+ return NULL;
+
+ if (type != 0) {
+ new = bridge->driver->alloc_by_type(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
+ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < page_count; i++) {
+ void *addr = bridge->driver->agp_alloc_page(bridge);
+
+ if (addr == NULL) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->memory[i] = virt_to_phys(addr);
+ new->page_count++;
+ }
+ new->bridge = bridge;
+
+ flush_agp_mappings();
+
+ return new;
+}
+EXPORT_SYMBOL(agp_allocate_memory);
+
+
+/* End - Generic routines for handling agp_memory structures */
+
+
+static int agp_return_size(void)
+{
+ int current_size;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ current_size = A_SIZE_8(temp)->size;
+ break;
+ case U16_APER_SIZE:
+ current_size = A_SIZE_16(temp)->size;
+ break;
+ case U32_APER_SIZE:
+ current_size = A_SIZE_32(temp)->size;
+ break;
+ case LVL2_APER_SIZE:
+ current_size = A_SIZE_LVL2(temp)->size;
+ break;
+ case FIXED_APER_SIZE:
+ current_size = A_SIZE_FIX(temp)->size;
+ break;
+ default:
+ current_size = 0;
+ break;
+ }
+
+ current_size -= (agp_memory_reserved / (1024*1024));
+ if (current_size <0)
+ current_size = 0;
+ return current_size;
+}
+
+
+int agp_num_entries(void)
+{
+ int num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ num_entries = A_SIZE_LVL2(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved>>PAGE_SHIFT;
+ if (num_entries<0)
+ num_entries = 0;
+ return num_entries;
+}
+EXPORT_SYMBOL_GPL(agp_num_entries);
+
+
+static int check_bridge_mode(struct pci_dev *dev)
+{
+ u32 agp3;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
+ pci_read_config_dword(dev, cap_ptr+AGPSTAT, &agp3);
+ if (agp3 & AGPSTAT_MODE_3_0)
+ return 1;
+ return 0;
+}
+
+
+/**
+ * agp_copy_info - copy bridge state information
+ *
+ * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
+ *
+ * This function copies information about the agp bridge device and the state of
+ * the agp backend into an agp_kern_info pointer.
+ */
+int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
+{
+ memset(info, 0, sizeof(struct agp_kern_info));
+ if (!bridge) {
+ info->chipset = NOT_SUPPORTED;
+ return -EIO;
+ }
+
+ info->version.major = bridge->version->major;
+ info->version.minor = bridge->version->minor;
+ info->chipset = SUPPORTED;
+ info->device = bridge->dev;
+ if (check_bridge_mode(bridge->dev))
+ info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
+ else
+ info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
+ info->mode = bridge->mode;
+ info->aper_base = bridge->gart_bus_addr;
+ info->aper_size = agp_return_size();
+ info->max_memory = bridge->max_memory_agp;
+ info->current_memory = atomic_read(&bridge->current_memory_agp);
+ info->cant_use_aperture = bridge->driver->cant_use_aperture;
+ info->vm_ops = bridge->vm_ops;
+ info->page_mask = ~0UL;
+ return 0;
+}
+EXPORT_SYMBOL(agp_copy_info);
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+/**
+ * agp_bind_memory - Bind an agp_memory structure into the GATT.
+ *
+ * @curr: agp_memory pointer
+ * @pg_start: an offset into the graphics aperture translation table
+ *
+ * It returns -EINVAL if the pointer == NULL.
+ * It returns -EBUSY if the area of the table requested is already in use.
+ */
+int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound == TRUE) {
+ printk (KERN_INFO PFX "memory %p is already bound!\n", curr);
+ return -EINVAL;
+ }
+ if (curr->is_flushed == FALSE) {
+ curr->bridge->driver->cache_flush();
+ curr->is_flushed = TRUE;
+ }
+ ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = TRUE;
+ curr->pg_start = pg_start;
+ return 0;
+}
+EXPORT_SYMBOL(agp_bind_memory);
+
+
+/**
+ * agp_unbind_memory - Removes an agp_memory structure from the GATT
+ *
+ * @curr: agp_memory pointer to be removed from the GATT.
+ *
+ * It returns -EINVAL if this piece of agp_memory is not currently bound to
+ * the graphics aperture translation table or if the agp_memory pointer == NULL
+ */
+int agp_unbind_memory(struct agp_memory *curr)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound != TRUE) {
+ printk (KERN_INFO PFX "memory %p was not bound!\n", curr);
+ return -EINVAL;
+ }
+
+ ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = FALSE;
+ curr->pg_start = 0;
+ return 0;
+}
+EXPORT_SYMBOL(agp_unbind_memory);
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+
+/* Generic Agp routines - Start */
+static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 tmp;
+
+ if (*requested_mode & AGP2_RESERVED_MASK) {
+ printk (KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+ *requested_mode &= ~AGP2_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. Only one should be set. */
+ tmp = *requested_mode & 7;
+ switch (tmp) {
+ case 0:
+ printk (KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT2_1X;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 3:
+ *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
+ break;
+ case 4:
+ break;
+ case 5:
+ case 6:
+ case 7:
+ *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
+ break;
+ }
+
+ /* disable SBA if it's not supported */
+ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ /* Set rate */
+ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
+ *bridge_agpstat &= ~AGPSTAT2_4X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
+ *bridge_agpstat &= ~AGPSTAT2_2X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
+ *bridge_agpstat &= ~AGPSTAT2_1X;
+
+ /* Now we know what mode it should be, clear out the unwanted bits. */
+ if (*bridge_agpstat & AGPSTAT2_4X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
+
+ if (*bridge_agpstat & AGPSTAT2_2X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
+
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
+
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+
+ /* If we've dropped down to 1X, disable fast writes. */
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+}
+
+/*
+ * requested_mode = Mode requested by (typically) X.
+ * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
+ * vga_agpstat = PCI_AGP_STATUS from graphic card.
+ */
+static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
+ u32 tmp;
+
+ if (*requested_mode & AGP3_RESERVED_MASK) {
+ printk (KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+ *requested_mode &= ~AGP3_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. */
+ tmp = *requested_mode & 7;
+ if (tmp == 0) {
+ printk (KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ if (tmp >= 3) {
+ printk (KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
+ *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
+ }
+
+ /* ARQSZ - Set the value to the maximum one.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
+ max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
+
+ /* Calibration cycle.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
+ min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
+
+ /* SBA *must* be supported for AGP v3 */
+ *bridge_agpstat |= AGPSTAT_SBA;
+
+ /*
+ * Set speed.
+ * Check for invalid speeds. This can happen when applications
+ * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+ */
+ if (*requested_mode & AGPSTAT_MODE_3_0) {
+ /*
+ * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
+ * have been passed a 3.0 mode, but with 2.x speed bits set.
+ * AGP2.x 4x -> AGP3.0 4x.
+ */
+ if (*requested_mode & AGPSTAT2_4X) {
+ printk (KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~AGPSTAT2_4X;
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ } else {
+ /*
+ * The caller doesn't know what they are doing. We are in 3.0 mode,
+ * but have been passed an AGP 2.x mode.
+ * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+ */
+ printk (KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+
+ if (*requested_mode & AGPSTAT3_8X) {
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk ("%s requested AGPx8 but bridge not capable.\n", current->comm);
+ return;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk ("%s requested AGPx8 but graphic card not capable.\n", current->comm);
+ return;
+ }
+ /* All set, bridge & device can do AGP x8*/
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ goto done;
+
+ } else {
+
+ /*
+ * If we didn't specify AGPx8, we can only do x4.
+ * If the hardware can't do x4, we're up shit creek, and never
+ * should have got this far.
+ */
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X))
+ *bridge_agpstat |= AGPSTAT3_4X;
+ else {
+ printk (KERN_INFO PFX "Badness. Don't know which AGP mode to set. "
+ "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n",
+ origbridge, origvga, *bridge_agpstat, *vga_agpstat);
+ if (!(*bridge_agpstat & AGPSTAT3_4X))
+ printk (KERN_INFO PFX "Bridge couldn't do AGP x4.\n");
+ if (!(*vga_agpstat & AGPSTAT3_4X))
+ printk (KERN_INFO PFX "Graphic card couldn't do AGP x4.\n");
+ return;
+ }
+ }
+
+done:
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+}
+
+
+/**
+ * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @requested_mode: requested agp_stat from userspace (Typically from X)
+ * @bridge_agpstat: current agp_stat from AGP bridge.
+ *
+ * This function will hunt for an AGP graphics card, and try to match
+ * the requested mode to the capabilities of both the bridge and the card.
+ */
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
+{
+ struct pci_dev *device = NULL;
+ u32 vga_agpstat;
+ u8 cap_ptr;
+
+ for (;;) {
+ device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
+ if (!device) {
+ printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ return 0;
+ }
+ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (cap_ptr)
+ break;
+ }
+
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
+
+ /* adjust RQ depth */
+ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
+ min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
+ min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
+
+ /* disable FW if it's not supported */
+ if (!((bridge_agpstat & AGPSTAT_FW) &&
+ (vga_agpstat & AGPSTAT_FW) &&
+ (requested_mode & AGPSTAT_FW)))
+ bridge_agpstat &= ~AGPSTAT_FW;
+
+ /* Check to see if we are operating in 3.0 mode */
+ if (check_bridge_mode(agp_bridge->dev))
+ agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+ else
+ agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+
+ pci_dev_put(device);
+ return bridge_agpstat;
+}
+EXPORT_SYMBOL(agp_collect_device_status);
+
+
+void agp_device_command(u32 bridge_agpstat, int agp_v3)
+{
+ struct pci_dev *device = NULL;
+ int mode;
+
+ mode = bridge_agpstat & 0x7;
+ if (agp_v3)
+ mode *= 4;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
+ agp_v3 ? 3 : 2, pci_name(device), mode);
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
+ }
+}
+EXPORT_SYMBOL(agp_device_command);
+
+
+void get_agp_version(struct agp_bridge_data *bridge)
+{
+ u32 ncapid;
+
+ /* Exit early if already set by errata workarounds. */
+ if (bridge->major_version != 0)
+ return;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
+ bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
+}
+EXPORT_SYMBOL(get_agp_version);
+
+
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
+{
+ u32 bridge_agpstat, temp;
+
+ get_agp_version(agp_bridge);
+
+ printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
+ agp_bridge->major_version,
+ agp_bridge->minor_version,
+ pci_name(agp_bridge->dev));
+
+ pci_read_config_dword(agp_bridge->dev,
+ agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
+
+ bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
+ if (bridge_agpstat == 0)
+ /* Something bad happened. FIXME: Return error code? */
+ return;
+
+ bridge_agpstat |= AGPSTAT_AGP_ENABLE;
+
+ /* Do AGP version specific frobbing. */
+ if (bridge->major_version >= 3) {
+ if (check_bridge_mode(bridge->dev)) {
+ /* If we have 3.5, we can do the isoch stuff. */
+ if (bridge->minor_version >= 5)
+ agp_3_5_enable(bridge);
+ agp_device_command(bridge_agpstat, TRUE);
+ return;
+ } else {
+ /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
+ bridge_agpstat &= ~(7<<10) ;
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, &temp);
+ temp |= (1<<9);
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, temp);
+
+ printk (KERN_INFO PFX "Device is in legacy mode,"
+ " falling back to 2.x\n");
+ }
+ }
+
+ /* AGP v<3 */
+ agp_device_command(bridge_agpstat, FALSE);
+}
+EXPORT_SYMBOL(agp_generic_enable);
+
+
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* The generic routines can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ if (bridge->driver->size_type != FIXED_APER_SIZE) {
+ do {
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ size = A_SIZE_8(temp)->size;
+ page_order =
+ A_SIZE_8(temp)->page_order;
+ num_entries =
+ A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ size = A_SIZE_16(temp)->size;
+ page_order = A_SIZE_16(temp)->page_order;
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ size = page_order = num_entries = 0;
+ break;
+ }
+
+ table = (char *) __get_free_pages(GFP_KERNEL,
+ page_order);
+
+ if (table == NULL) {
+ i++;
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ bridge->current_size = A_IDX8(bridge);
+ break;
+ case U16_APER_SIZE:
+ bridge->current_size = A_IDX16(bridge);
+ break;
+ case U32_APER_SIZE:
+ bridge->current_size = A_IDX32(bridge);
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ bridge->current_size =
+ bridge->current_size;
+ break;
+ }
+ temp = bridge->current_size;
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+ } else {
+ size = ((struct aper_size_info_fixed *) temp)->size;
+ page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+ num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+ }
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ SetPageReserved(page);
+
+ bridge->gatt_table_real = (u32 *) table;
+ agp_gatt_table = (void *)table;
+
+ bridge->driver->cache_flush();
+ bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ (PAGE_SIZE * (1 << page_order)));
+ bridge->driver->cache_flush();
+
+ if (bridge->gatt_table == NULL) {
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_pages((unsigned long) table, page_order);
+
+ return -ENOMEM;
+ }
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = 0; i < num_entries; i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_create_gatt_table);
+
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ page_order = A_SIZE_8(temp)->page_order;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ break;
+ case FIXED_APER_SIZE:
+ page_order = A_SIZE_FIX(temp)->page_order;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ page_order = 0;
+ break;
+ }
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table. */
+
+ iounmap(bridge->gatt_table);
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_pages((unsigned long) bridge->gatt_table_real, page_order);
+
+ agp_gatt_table = NULL;
+ bridge->gatt_table = NULL;
+ bridge->gatt_table_real = NULL;
+ bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_free_gatt_table);
+
+
+int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved/PAGE_SIZE;
+ if (num_entries < 0) num_entries = 0;
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: could wrap */
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
+ readl(bridge->gatt_table+j); /* PCI Posting. */
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_insert_memory);
+
+
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (type != 0 || mem->type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_remove_memory);
+
+
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(agp_generic_alloc_by_type);
+
+
+void agp_generic_free_by_type(struct agp_memory *curr)
+{
+ vfree(curr->memory);
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_generic_free_by_type);
+
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation and by default they reserve the allocated
+ * memory. They also handle incrementing the current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page * page;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ return NULL;
+
+ map_page_into_agp(page);
+
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+EXPORT_SYMBOL(agp_generic_alloc_page);
+
+
+void agp_generic_destroy_page(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ unmap_page_from_agp(page);
+ put_page(page);
+ unlock_page(page);
+ free_page((unsigned long)addr);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+EXPORT_SYMBOL(agp_generic_destroy_page);
+
+/* End Basic Page Allocation Routines */
+
+
+/**
+ * agp_enable - initialise the agp point-to-point connection.
+ *
+ * @mode: agp mode register value to configure with.
+ */
+void agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ if (!bridge)
+ return;
+ bridge->driver->agp_enable(bridge, mode);
+}
+EXPORT_SYMBOL(agp_enable);
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
+{
+ if (list_empty(&agp_bridges))
+ return NULL;
+
+ return agp_bridge;
+}
+
+static void ipi_handler(void *null)
+{
+ flush_agp_cache();
+}
+
+void global_cache_flush(void)
+{
+ if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
+ panic(PFX "timed out waiting for the other CPUs!\n");
+}
+EXPORT_SYMBOL(global_cache_flush);
+
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ /* memory type is ignored in the generic routine */
+ if (bridge->driver->masks)
+ return addr | bridge->driver->masks[0].mask;
+ else
+ return addr;
+}
+EXPORT_SYMBOL(agp_generic_mask_memory);
+
+/*
+ * These functions are implemented according to the AGPv3 spec,
+ * which covers implementation details that had previously been
+ * left open.
+ */
+
+int agp3_generic_fetch_size(void)
+{
+ u16 temp_size;
+ int i;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp_size == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_fetch_size);
+
+void agp3_generic_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
+}
+EXPORT_SYMBOL(agp3_generic_tlbflush);
+
+int agp3_generic_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* set aperture size */
+ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
+ /* set gart pointer */
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
+ /* enable aperture and GTLB */
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_configure);
+
+void agp3_generic_cleanup(void)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
+}
+EXPORT_SYMBOL(agp3_generic_cleanup);
+
+struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+{
+ {4096, 1048576, 10,0x000},
+ {2048, 524288, 9, 0x800},
+ {1024, 262144, 8, 0xc00},
+ { 512, 131072, 7, 0xe00},
+ { 256, 65536, 6, 0xf00},
+ { 128, 32768, 5, 0xf20},
+ { 64, 16384, 4, 0xf30},
+ { 32, 8192, 3, 0xf38},
+ { 16, 4096, 2, 0xf3c},
+ { 8, 2048, 1, 0xf3e},
+ { 4, 1024, 0, 0xf3f}
+};
+EXPORT_SYMBOL(agp3_generic_sizes);
+
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
new file mode 100644
index 00000000000..6052bfa04c7
--- /dev/null
+++ b/drivers/char/agp/hp-agp.c
@@ -0,0 +1,552 @@
+/*
+ * HP zx1 AGPGART routines.
+ *
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+
+#include <asm/acpi-ext.h>
+
+#include "agp.h"
+
+#ifndef log2
+#define log2(x) ffz(~(x))
+#endif
+
+#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE 0x300
+#define HP_ZX1_IMASK 0x308
+#define HP_ZX1_PCOM 0x310
+#define HP_ZX1_TCNFG 0x318
+#define HP_ZX1_PDIR_BASE 0x320
+
+#define HP_ZX1_IOVA_BASE GB(1UL)
+#define HP_ZX1_IOVA_SIZE GB(1UL)
+#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
+#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
+
+#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
+#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
+
+#define AGP8X_MODE_BIT 3
+#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
+static int hp_zx1_gart_found;
+
+static struct aper_size_info_fixed hp_zx1_sizes[] =
+{
+ {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
+};
+
+static struct gatt_mask hp_zx1_masks[] =
+{
+ {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
+};
+
+static struct _hp_private {
+ volatile u8 __iomem *ioc_regs;
+ volatile u8 __iomem *lba_regs;
+ int lba_cap_offset;
+ u64 *io_pdir; // PDIR for entire IOVA
+ u64 *gatt; // PDIR just for GART (subset of above)
+ u64 gatt_entries;
+ u64 iova_base;
+ u64 gart_base;
+ u64 gart_size;
+ u64 io_pdir_size;
+ int io_pdir_owner; // do we own it, or share it with sba_iommu?
+ int io_page_size;
+ int io_tlb_shift;
+ int io_tlb_ps; // IOC ps config
+ int io_pages_per_kpage;
+} hp_private;
+
+static int __init hp_zx1_ioc_shared(void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
+
+ /*
+ * IOC already configured by sba_iommu module; just use
+ * its setup. We assume:
+ * - IOVA space is 1Gb in size
+ * - first 512Mb is IOMMU, second 512Mb is GART
+ */
+ hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
+ switch (hp->io_tlb_ps) {
+ case 0: hp->io_tlb_shift = 12; break;
+ case 1: hp->io_tlb_shift = 13; break;
+ case 2: hp->io_tlb_shift = 14; break;
+ case 3: hp->io_tlb_shift = 16; break;
+ default:
+ printk(KERN_ERR PFX "Invalid IOTLB page size "
+ "configuration 0x%x\n", hp->io_tlb_ps);
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENODEV;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
+
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+
+ hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+
+ if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+ /* Normal case when no AGP device in system */
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
+ "GART disabled\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_owner (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
+
+ /*
+ * Select an IOV page size no larger than system page size.
+ */
+ if (PAGE_SIZE >= KB(64)) {
+ hp->io_tlb_shift = 16;
+ hp->io_tlb_ps = 3;
+ } else if (PAGE_SIZE >= KB(16)) {
+ hp->io_tlb_shift = 14;
+ hp->io_tlb_ps = 2;
+ } else if (PAGE_SIZE >= KB(8)) {
+ hp->io_tlb_shift = 13;
+ hp->io_tlb_ps = 1;
+ } else {
+ hp->io_tlb_shift = 12;
+ hp->io_tlb_ps = 0;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = HP_ZX1_IOVA_BASE;
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
+
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+ hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+
+ hp->ioc_regs = ioremap(hpa, 1024);
+ if (!hp->ioc_regs)
+ return -ENOMEM;
+
+ /*
+ * If the IOTLB is currently disabled, we can take it over.
+ * Otherwise, we have to share with sba_iommu.
+ */
+ hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
+
+ if (hp->io_pdir_owner)
+ return hp_zx1_ioc_owner();
+
+ return hp_zx1_ioc_shared();
+}
+
+static int
+hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
+{
+ u16 status;
+ u8 pos, id;
+ int ttl = 48;
+
+ status = readw(hpa+PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pos = readb(hpa+PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ id = readb(hpa+pos+PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static int __init
+hp_zx1_lba_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+ int cap;
+
+ hp->lba_regs = ioremap(hpa, 256);
+ if (!hp->lba_regs)
+ return -ENOMEM;
+
+ hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
+
+ cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
+ if (cap != PCI_CAP_ID_AGP) {
+ printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
+ cap, hp->lba_cap_offset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_fetch_size(void)
+{
+ int size;
+
+ size = hp_private.gart_size / MB(1);
+ hp_zx1_sizes[0].size = size;
+ agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
+ return size;
+}
+
+static int
+hp_zx1_configure (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ agp_bridge->gart_bus_addr = hp->gart_base;
+ agp_bridge->capndx = hp->lba_cap_offset;
+ agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+
+ if (hp->io_pdir_owner) {
+ writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
+ readl(hp->ioc_regs+HP_ZX1_TCNFG);
+ writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK);
+ readl(hp->ioc_regs+HP_ZX1_IMASK);
+ writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
+ readl(hp->ioc_regs+HP_ZX1_IBASE);
+ writel(hp->iova_base|log2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
+ readl(hp->ioc_regs+HP_ZX1_PCOM);
+ }
+
+ return 0;
+}
+
+static void
+hp_zx1_cleanup (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->ioc_regs) {
+ if (hp->io_pdir_owner) {
+ writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
+ readq(hp->ioc_regs+HP_ZX1_IBASE);
+ }
+ iounmap(hp->ioc_regs);
+ }
+ if (hp->lba_regs)
+ iounmap(hp->lba_regs);
+}
+
+static void
+hp_zx1_tlbflush (struct agp_memory *mem)
+{
+ struct _hp_private *hp = &hp_private;
+
+ writeq(hp->gart_base | log2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
+ readq(hp->ioc_regs+HP_ZX1_PCOM);
+}
+
+static int
+hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+ int i;
+
+ if (hp->io_pdir_owner) {
+ hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+ get_order(hp->io_pdir_size));
+ if (!hp->io_pdir) {
+ printk(KERN_ERR PFX "Couldn't allocate contiguous "
+ "memory for I/O PDIR\n");
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENOMEM;
+ }
+ memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+ }
+
+ for (i = 0; i < hp->gatt_entries; i++) {
+ hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->io_pdir_owner)
+ free_pages((unsigned long) hp->io_pdir,
+ get_order(hp->io_pdir_size));
+ else
+ hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+ return 0;
+}
+
+static int
+hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, k;
+ off_t j, io_pg_start;
+ int io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + io_pg_count)) {
+ if (hp->gatt[j]) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ unsigned long paddr;
+
+ paddr = mem->memory[i];
+ for (k = 0;
+ k < hp->io_pages_per_kpage;
+ k++, j++, paddr += hp->io_page_size) {
+ hp->gatt[j] =
+ agp_bridge->driver->mask_memory(agp_bridge,
+ paddr, type);
+ }
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int
+hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, io_pg_start, io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+ hp->gatt[i] = agp_bridge->scratch_page;
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static unsigned long
+hp_zx1_mask_memory (struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static void
+hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
+{
+ struct _hp_private *hp = &hp_private;
+ u32 command;
+
+ command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= 0x00000100;
+
+ writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
+
+ agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+struct agp_bridge_driver hp_zx1_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .configure = hp_zx1_configure,
+ .fetch_size = hp_zx1_fetch_size,
+ .cleanup = hp_zx1_cleanup,
+ .tlb_flush = hp_zx1_tlbflush,
+ .mask_memory = hp_zx1_mask_memory,
+ .masks = hp_zx1_masks,
+ .agp_enable = hp_zx1_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = hp_zx1_create_gatt_table,
+ .free_gatt_table = hp_zx1_free_gatt_table,
+ .insert_memory = hp_zx1_insert_memory,
+ .remove_memory = hp_zx1_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .cant_use_aperture = 1,
+};
+
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
+{
+ struct agp_bridge_data *bridge;
+ int error = 0;
+
+ error = hp_zx1_ioc_init(ioc_hpa);
+ if (error)
+ goto fail;
+
+ error = hp_zx1_lba_init(lba_hpa);
+ if (error)
+ goto fail;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ bridge->driver = &hp_zx1_driver;
+
+ fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
+ bridge->dev = &fake_bridge_dev;
+
+ error = agp_add_bridge(bridge);
+ fail:
+ if (error)
+ hp_zx1_cleanup();
+ return error;
+}
+
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
+{
+ acpi_handle handle, parent;
+ acpi_status status;
+ struct acpi_buffer buffer;
+ struct acpi_device_info *info;
+ u64 lba_hpa, sba_hpa, length;
+ int match;
+
+ status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* keep looking for another bridge */
+
+ /* Look for an enclosing IOC scope and find its CSR space */
+ handle = obj;
+ do {
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ /* TBD check _CID also */
+ info = buffer.pointer;
+ info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
+ match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
+ ACPI_MEM_FREE(info);
+ if (match) {
+ status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+ if (ACPI_SUCCESS(status))
+ break;
+ else {
+ printk(KERN_ERR PFX "Detected HP ZX1 "
+ "AGP LBA but no IOC.\n");
+ return AE_OK;
+ }
+ }
+ }
+
+ status = acpi_get_parent(handle, &parent);
+ handle = parent;
+ } while (ACPI_SUCCESS(status));
+
+ if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+ return AE_OK;
+
+ printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n",
+ (char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+
+ hp_zx1_gart_found = 1;
+ return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
+}
+
+static int __init
+agp_hp_init (void)
+{
+ if (agp_off)
+ return -EINVAL;
+
+ acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ return -ENODEV;
+}
+
+static void __exit
+agp_hp_cleanup (void)
+{
+}
+
+module_init(agp_hp_init);
+module_exit(agp_hp_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
new file mode 100644
index 00000000000..adbea896c0d
--- /dev/null
+++ b/drivers/char/agp/i460-agp.c
@@ -0,0 +1,642 @@
+/*
+ * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
+ * the "Intel 460GTX Chipset Software Developer's Manual":
+ * http://developer.intel.com/design/itanium/downloads/24870401s.htm
+ */
+/*
+ * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
+ * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+
+#include "agp.h"
+
+#define INTEL_I460_BAPBASE 0x98
+#define INTEL_I460_GXBCTL 0xa0
+#define INTEL_I460_AGPSIZ 0xa2
+#define INTEL_I460_ATTBASE 0xfe200000
+#define INTEL_I460_GATT_VALID (1UL << 24)
+#define INTEL_I460_GATT_COHERENT (1UL << 25)
+
+/*
+ * The i460 can operate with large (4MB) pages, but there is no sane way to support this
+ * within the current kernel/DRM environment, so we disable the relevant code for now.
+ * See also comments in ia64_alloc_page()...
+ */
+#define I460_LARGE_IO_PAGES 0
+
+#if I460_LARGE_IO_PAGES
+# define I460_IO_PAGE_SHIFT i460.io_page_shift
+#else
+# define I460_IO_PAGE_SHIFT 12
+#endif
+
+#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
+#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
+#define I460_SRAM_IO_DISABLE (1 << 4)
+#define I460_BAPBASE_ENABLE (1 << 3)
+#define I460_AGPSIZ_MASK 0x7
+#define I460_4M_PS (1 << 1)
+
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG (1UL << 0)
+#define I460_GXBCTL_BWC (1UL << 2)
+
+/*
+ * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
+ * gatt_table and gatt_table_real pointers a "void *"...
+ */
+#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
+#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
+/*
+ * The 460 spec says we have to read the last location written to make sure that all
+ * writes have taken effect
+ */
+#define WR_FLUSH_GATT(index) RD_GATT(index)
+
+#define log2(x) ffz(~(x))
+
+static struct {
+ void *gatt; /* ioremap'd GATT area */
+
+ /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
+ u8 io_page_shift;
+
+ /* BIOS configures chipset to one of 2 possible apbase values: */
+ u8 dynamic_apbase;
+
+ /* structure for tracking partial use of 4MB GART pages: */
+ struct lp_desc {
+ unsigned long *alloced_map; /* bitmap of kernel-pages in use */
+ int refcount; /* number of kernel pages using the large page */
+ u64 paddr; /* physical address of large page */
+ } *lp_desc;
+} i460;
+
+static struct aper_size_info_8 i460_sizes[3] =
+{
+ /*
+ * The 32GB aperture is only available with a 4M GART page size. Due to the
+ * dynamic GART page size, we can't figure out page_order or num_entries until
+ * runtime.
+ */
+ {32768, 0, 0, 4},
+ {1024, 0, 0, 2},
+ {256, 0, 0, 1}
+};
+
+static struct gatt_mask i460_masks[] =
+{
+ {
+ .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
+ .type = 0
+ }
+};
+
+static int i460_fetch_size (void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ /* Determine the GART page size */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
+ i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
+ pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
+
+ if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
+ printk(KERN_ERR PFX
+ "I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n",
+ 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT));
+ return 0;
+ }
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+
+ /* Exit now if the IO drivers for the GART SRAMS are turned off */
+ if (temp & I460_SRAM_IO_DISABLE) {
+ printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
+ printk(KERN_ERR PFX "AGPGART operation not possible\n");
+ return 0;
+ }
+
+ /* Make sure we don't try to create an 2 ^ 23 entry GATT */
+ if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+ printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
+ return 0;
+ }
+
+ /* Determine the proper APBASE register */
+ if (temp & I460_BAPBASE_ENABLE)
+ i460.dynamic_apbase = INTEL_I460_BAPBASE;
+ else
+ i460.dynamic_apbase = AGP_APBASE;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /*
+ * Dynamically calculate the proper num_entries and page_order values for
+ * the define aperture sizes. Take care not to shift off the end of
+ * values[i].size.
+ */
+ values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
+ values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
+ }
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /* Neglect control bits when matching up size_value */
+ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/* There isn't anything to do here since 460 has no GART TLB. */
+static void i460_tlb_flush (struct agp_memory *mem)
+{
+ return;
+}
+
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void i460_write_agpsiz (u8 size_value)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
+ ((temp & ~I460_AGPSIZ_MASK) | size_value));
+}
+
+static void i460_cleanup (void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ i460_write_agpsiz(previous_size->size_value);
+
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
+ kfree(i460.lp_desc);
+}
+
+static int i460_configure (void)
+{
+ union {
+ u32 small[2];
+ u64 large;
+ } temp;
+ size_t size;
+ u8 scratch;
+ struct aper_size_info_8 *current_size;
+
+ temp.large = 0;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ i460_write_agpsiz(current_size->size_value);
+
+ /*
+ * Do the necessary rigmarole to read all eight bytes of APBASE.
+ * This has to be done since the AGP aperture can be above 4GB on
+ * 460 based systems.
+ */
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
+
+ /* Clear BAR control bits */
+ agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
+ (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+ /*
+ * Initialize partial allocation trackers if a GART page is bigger than a kernel
+ * page.
+ */
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
+ size = current_size->num_entries * sizeof(i460.lp_desc[0]);
+ i460.lp_desc = kmalloc(size, GFP_KERNEL);
+ if (!i460.lp_desc)
+ return -ENOMEM;
+ memset(i460.lp_desc, 0, size);
+ }
+ return 0;
+}
+
+static int i460_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ int page_order, num_entries, i;
+ void *temp;
+
+ /*
+ * Load up the fixed address of the GART SRAMS which hold our GATT table.
+ */
+ temp = agp_bridge->current_size;
+ page_order = A_SIZE_8(temp)->page_order;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
+
+ /* These are no good, the should be removed from the agp_bridge strucure... */
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+static int i460_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ int num_entries, i;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(num_entries - 1);
+
+ iounmap(i460.gatt);
+ return 0;
+}
+
+/*
+ * The following functions are called when the I/O (GART) page size is smaller than
+ * PAGE_SIZE.
+ */
+
+static int i460_insert_memory_small_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ unsigned long paddr, io_pg_start, io_page_size;
+ int i, j, k, num_entries;
+ void *temp;
+
+ pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
+ mem, pg_start, type, mem->memory[0]);
+
+ io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
+ pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
+ j, RD_GATT(j));
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ io_page_size = 1UL << I460_IO_PAGE_SHIFT;
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ paddr = mem->memory[i];
+ for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
+ WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge,
+ paddr, mem->type));
+ }
+ WR_FLUSH_GATT(j - 1);
+ return 0;
+}
+
+static int i460_remove_memory_small_io_page(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i;
+
+ pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
+ mem, pg_start, type);
+
+ pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+#if I460_LARGE_IO_PAGES
+
+/*
+ * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are smaller than a
+ * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
+ * large GART pages to work around this issue.
+ *
+ * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
+ * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
+ * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
+ */
+
+static int i460_alloc_large_page (struct lp_desc *lp)
+{
+ unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
+ size_t map_size;
+ void *lpage;
+
+ lpage = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (!lpage) {
+ printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
+ return -ENOMEM;
+ }
+
+ map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
+ lp->alloced_map = kmalloc(map_size, GFP_KERNEL);
+ if (!lp->alloced_map) {
+ free_pages((unsigned long) lpage, order);
+ printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
+ return -ENOMEM;
+ }
+ memset(lp->alloced_map, 0, map_size);
+
+ lp->paddr = virt_to_phys(lpage);
+ lp->refcount = 0;
+ atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+ return 0;
+}
+
+static void i460_free_large_page (struct lp_desc *lp)
+{
+ kfree(lp->alloced_map);
+ lp->alloced_map = NULL;
+
+ free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+ atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+}
+
+static int i460_insert_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, start_offset, end_offset, idx, pg, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ if (end > i460.lp_desc + num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ /* Check if the requested region of the aperture is free */
+ for (lp = start; lp <= end; ++lp) {
+ if (!lp->alloced_map)
+ continue; /* OK, the entire large page is available... */
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++)
+ {
+ if (test_bit(idx, lp->alloced_map))
+ return -EBUSY;
+ }
+ }
+
+ for (lp = start, i = 0; lp <= end; ++lp) {
+ if (!lp->alloced_map) {
+ /* Allocate new GART pages... */
+ if (i460_alloc_large_page(lp) < 0)
+ return -ENOMEM;
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge,
+ lp->paddr, 0));
+ WR_FLUSH_GATT(pg);
+ }
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->memory[i] = lp->paddr + idx*PAGE_SIZE;
+ __set_bit(idx, lp->alloced_map);
+ ++lp->refcount;
+ }
+ }
+ return 0;
+}
+
+static int i460_remove_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, pg, start_offset, end_offset, idx, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ temp = agp_bridge->driver->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ for (i = 0, lp = start; lp <= end; ++lp) {
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->memory[i] = 0;
+ __clear_bit(idx, lp->alloced_map);
+ --lp->refcount;
+ }
+
+ /* Free GART pages if they are unused */
+ if (lp->refcount == 0) {
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, 0);
+ WR_FLUSH_GATT(pg);
+ i460_free_large_page(lp);
+ }
+ }
+ return 0;
+}
+
+/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
+
+static int i460_insert_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_insert_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_insert_memory_large_io_page(mem, pg_start, type);
+}
+
+static int i460_remove_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_remove_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_remove_memory_large_io_page(mem, pg_start, type);
+}
+
+/*
+ * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
+ * allocate memory until we know where it is to be bound in the aperture (a
+ * multi-kernel-page alloc might fit inside of an already allocated GART page).
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there before bind time
+ * (I don't think current drivers do)...
+ */
+static void *i460_alloc_page (struct agp_bridge_data *bridge)
+{
+ void *page;
+
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ page = agp_generic_alloc_page(agp_bridge);
+ else
+ /* Returning NULL would cause problems */
+ /* AK: really dubious code. */
+ page = (void *)~0UL;
+ return page;
+}
+
+static void i460_destroy_page (void *page)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ agp_generic_destroy_page(page);
+}
+
+#endif /* I460_LARGE_IO_PAGES */
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ /* Make sure the returned address is a valid GATT entry */
+ return bridge->driver->masks[0].mask
+ | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12);
+}
+
+struct agp_bridge_driver intel_i460_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = i460_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .configure = i460_configure,
+ .fetch_size = i460_fetch_size,
+ .cleanup = i460_cleanup,
+ .tlb_flush = i460_tlb_flush,
+ .mask_memory = i460_mask_memory,
+ .masks = i460_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = i460_create_gatt_table,
+ .free_gatt_table = i460_free_gatt_table,
+#if I460_LARGE_IO_PAGES
+ .insert_memory = i460_insert_memory,
+ .remove_memory = i460_remove_memory,
+ .agp_alloc_page = i460_alloc_page,
+ .agp_destroy_page = i460_destroy_page,
+#else
+ .insert_memory = i460_insert_memory_small_io_page,
+ .remove_memory = i460_remove_memory_small_io_page,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+#endif
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .cant_use_aperture = 1,
+};
+
+static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &intel_i460_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_intel_i460_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_intel_i460_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_84460GX,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
+
+static struct pci_driver agp_intel_i460_pci_driver = {
+ .name = "agpgart-intel-i460",
+ .id_table = agp_intel_i460_pci_table,
+ .probe = agp_intel_i460_probe,
+ .remove = __devexit_p(agp_intel_i460_remove),
+};
+
+static int __init agp_intel_i460_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_i460_pci_driver);
+}
+
+static void __exit agp_intel_i460_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_i460_pci_driver);
+}
+
+module_init(agp_intel_i460_init);
+module_exit(agp_intel_i460_cleanup);
+
+MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
new file mode 100644
index 00000000000..8c7d727432b
--- /dev/null
+++ b/drivers/char/agp/intel-agp.c
@@ -0,0 +1,1833 @@
+/*
+ * Intel AGPGART routines.
+ */
+
+/*
+ * Intel(R) 855GM/852GM and 865G support added by David Dawes
+ * <dawes@tungstengraphics.com>.
+ *
+ * Intel(R) 915G/915GM support added by Alan Hourihane
+ * <alanh@tungstengraphics.com>.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+static struct aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+static struct gatt_mask intel_i810_masks[] =
+{
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+ {.mask = I810_PTE_VALID, .type = 0}
+};
+
+static struct _intel_i810_private {
+ struct pci_dev *i810_dev; /* device one */
+ volatile u8 __iomem *registers;
+ int num_dcache_entries;
+} intel_i810_private;
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ printk(KERN_WARNING PFX "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values);
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_i810_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_i810_private.registers) {
+ printk(KERN_ERR PFX "Unable to remap memory.\n");
+ return -ENOMEM;
+ }
+
+ if ((readl(intel_i810_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
+ intel_i810_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_i810_private.registers+I810_PGETBL_CTL);
+ readl(intel_i810_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI posting. */
+ }
+ }
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ writel(0, intel_i810_private.registers+I810_PGETBL_CTL);
+ readl(intel_i810_private.registers); /* PCI Posting. */
+ iounmap(intel_i810_private.registers);
+}
+
+static void intel_i810_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static void *i8xx_alloc_pages(void)
+{
+ struct page * page;
+
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
+ global_flush_tlb();
+ __free_page(page);
+ return NULL;
+ }
+ global_flush_tlb();
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+
+static void i8xx_destroy_pages(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ change_page_attr(page, 4, PAGE_KERNEL);
+ global_flush_tlb();
+ put_page(page);
+ unlock_page(page);
+ free_pages((unsigned long)addr, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+ return -EBUSY;
+ }
+
+ if (type != 0 || mem->type != 0) {
+ if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
+ /* special insert */
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+ }
+ if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
+ goto insert;
+ return -EINVAL;
+ }
+
+insert:
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type),
+ intel_i810_private.registers+I810_PTE_BASE+(j*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
+ }
+ global_cache_flush();
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ void *addr;
+
+ if (pg_count != 1 && pg_count != 4)
+ return NULL;
+
+ switch (pg_count) {
+ case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ addr = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (addr == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->memory[0] = virt_to_phys(addr);
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->memory[1] = new->memory[0] + PAGE_SIZE;
+ new->memory[2] = new->memory[1] + PAGE_SIZE;
+ new->memory[3] = new->memory[2] + PAGE_SIZE;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = new->memory[0];
+ return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_i810_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ vfree(new->memory);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+
+ return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if(curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
+ else
+ agp_bridge->driver->agp_destroy_page(
+ phys_to_virt(curr->memory[0]));
+ vfree(curr->memory);
+ }
+ kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+ {256, 65536, 6},
+};
+
+static struct _intel_i830_private {
+ struct pci_dev *i830_dev; /* device one */
+ volatile u8 __iomem *registers;
+ volatile u32 __iomem *gtt; /* I915G */
+ int gtt_entries;
+} intel_i830_private;
+
+static void intel_i830_init_gtt_entries(void)
+{
+ u16 gmch_ctrl;
+ int gtt_entries;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ int size;
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+
+ /* We obtain the size of the GTT, which is also stored (for some
+ * reason) at the top of stolen memory. Then we add 4KB to that
+ * for the video BIOS popup, which is also stored in there. */
+ size = agp_bridge->driver->fetch_size() + 4;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ gtt_entries = KB(512) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_i830_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ gtt_entries = MB(4) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ gtt_entries = MB(16) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ }
+ if (gtt_entries > 0)
+ printk(KERN_INFO PFX "Detected %dK %s memory.\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ else
+ printk(KERN_INFO PFX
+ "No pre-allocated video memory detected.\n");
+ gtt_entries /= KB(4);
+
+ intel_i830_private.gtt_entries = gtt_entries;
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
+ temp &= 0xfff80000;
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ?? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+ u16 gmch_ctrl;
+ struct aper_size_info_fixed *values;
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+ /* 855GM/852GM/865G has 128MB aperture size */
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ } else {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ }
+
+ return 0;
+}
+
+static int intel_i830_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
+ readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+ }
+
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+ iounmap(intel_i830_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type)
+{
+ int i,j,num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+ pg_start,intel_i830_private.gtt_entries);
+
+ printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+ return -EINVAL;
+
+ global_cache_flush(); /* FIXME: Necessary ?*/
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type),
+ intel_i830_private.registers+I810_PTE_BASE+(j*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i;
+
+ global_cache_flush();
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
+ readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
+{
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
+ readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
+ readl(intel_i830_private.gtt+i); /* PCI Posting. */
+ }
+ }
+
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+ iounmap(intel_i830_private.gtt);
+ iounmap(intel_i830_private.registers);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i,j,num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+ pg_start,intel_i830_private.gtt_entries);
+
+ printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+ return -EINVAL;
+
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type), intel_i830_private.gtt+j);
+ readl(intel_i830_private.gtt+j); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i;
+
+ global_cache_flush();
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
+ readl(intel_i830_private.gtt+i);
+ }
+
+ global_cache_flush();
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int intel_i915_fetch_size(void)
+{
+ struct aper_size_info_fixed *values;
+ u32 temp, offset = 0;
+
+#define I915_256MB_ADDRESS_MASK (1<<27)
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+ if (temp & I915_256MB_ADDRESS_MASK)
+ offset = 0; /* 128MB aperture */
+ else
+ offset = 2; /* 256MB aperture */
+ agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
+ return values[offset].size;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
+
+ intel_i830_private.gtt = ioremap(temp2, 256 * 1024);
+ if (!intel_i830_private.gtt)
+ return -ENOMEM;
+
+ temp &= 0xfff80000;
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static int intel_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int __intel_8xx_fetch_size(u8 temp)
+{
+ int i;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ return __intel_8xx_fetch_size(temp);
+}
+
+static int intel_815_fetch_size(void)
+{
+ u8 temp;
+
+ /* Intel 815 chipsets have a _weird_ APSIZE register with only
+ * one non-reserved bit, so mask the others out ... */
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ temp &= (1 << 3);
+
+ return __intel_8xx_fetch_size(temp);
+}
+
+static void intel_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int intel_815_configure(void)
+{
+ u32 temp, addr;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ /* attbase - aperture base */
+ /* the Intel 815 chipset spec. says that bits 29-31 in the
+ * ATTBASE register are reserved -> try not to write them */
+ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
+ printk (KERN_EMERG PFX "gatt bus addr too high");
+ return -EINVAL;
+ }
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
+ addr &= INTEL_815_ATTBASE_MASK;
+ addr |= agp_bridge->gatt_bus_addr;
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* apcont */
+ pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+ /* clear any possible error conditions */
+ /* Oddness : this chipset seems to have no ERRSTS register ! */
+ return 0;
+}
+
+static void intel_820_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+static void intel_820_cleanup(void)
+{
+ u8 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
+ temp & ~(1 << 1));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* global enable aperture access */
+ /* This flag is not accessed through MCHCFG register as in */
+ /* i850 chipset. */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_840_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
+ return 0;
+}
+
+static int intel_845_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* agpm */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_850_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_860_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
+ return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* gmch */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
+ return 0;
+}
+
+static int intel_7505_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mchcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
+
+ return 0;
+}
+
+/* Setup function */
+static struct gatt_mask intel_generic_masks[] =
+{
+ {.mask = 0x00000017, .type = 0}
+};
+
+static struct aper_size_info_8 intel_815_sizes[2] =
+{
+ {64, 16384, 4, 0},
+ {32, 8192, 3, 8},
+};
+
+static struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static struct aper_size_info_16 intel_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static struct aper_size_info_8 intel_830mp_sizes[4] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56}
+};
+
+static struct agp_bridge_driver intel_generic_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_generic_sizes,
+ .size_type = U16_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_configure,
+ .fetch_size = intel_fetch_size,
+ .cleanup = intel_cleanup,
+ .tlb_flush = intel_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_810_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i810_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i810_configure,
+ .fetch_size = intel_i810_fetch_size,
+ .cleanup = intel_i810_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = intel_i810_insert_entries,
+ .remove_memory = intel_i810_remove_entries,
+ .alloc_by_type = intel_i810_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_815_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_815_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .configure = intel_815_configure,
+ .fetch_size = intel_815_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_830_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i830_configure,
+ .fetch_size = intel_i830_fetch_size,
+ .cleanup = intel_i830_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i830_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i830_insert_entries,
+ .remove_memory = intel_i830_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_820_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_820_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_820_cleanup,
+ .tlb_flush = intel_820_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_830mp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_830mp_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = intel_830mp_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_840_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_840_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_845_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_845_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_850_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_850_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_860_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_860_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i915_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+
+static struct agp_bridge_driver intel_7505_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = intel_7505_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static int find_i810(u16 device)
+{
+ struct pci_dev *i810_dev;
+
+ i810_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (!i810_dev)
+ return 0;
+ intel_i810_private.i810_dev = i810_dev;
+ return 1;
+}
+
+static int find_i830(u16 device)
+{
+ struct pci_dev *i830_dev;
+
+ i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (i830_dev && PCI_FUNC(i830_dev->devfn) != 0) {
+ i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, i830_dev);
+ }
+
+ if (!i830_dev)
+ return 0;
+
+ intel_i830_private.i830_dev = i830_dev;
+ return 1;
+}
+
+static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ char *name = "(unknown)";
+ u8 cap_ptr = 0;
+ struct resource *r;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_82443LX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440LX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82443BX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440BX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82443GX_0:
+ bridge->driver = &intel_generic_driver;
+ name = "440GX";
+ break;
+ case PCI_DEVICE_ID_INTEL_82810_MC1:
+ name = "i810";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG1))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82810_MC3:
+ name = "i810 DC100";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG3))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82810E_MC:
+ name = "i810 E";
+ if (!find_i810(PCI_DEVICE_ID_INTEL_82810E_IG))
+ goto fail;
+ bridge->driver = &intel_810_driver;
+ break;
+ case PCI_DEVICE_ID_INTEL_82815_MC:
+ /*
+ * The i815 can operate either as an i810 style
+ * integrated device, or as an AGP4X motherboard.
+ */
+ if (find_i810(PCI_DEVICE_ID_INTEL_82815_CGC))
+ bridge->driver = &intel_810_driver;
+ else
+ bridge->driver = &intel_815_driver;
+ name = "i815";
+ break;
+ case PCI_DEVICE_ID_INTEL_82820_HB:
+ case PCI_DEVICE_ID_INTEL_82820_UP_HB:
+ bridge->driver = &intel_820_driver;
+ name = "i820";
+ break;
+ case PCI_DEVICE_ID_INTEL_82830_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82830_CGC)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_830mp_driver;
+ }
+ name = "830M";
+ break;
+ case PCI_DEVICE_ID_INTEL_82840_HB:
+ bridge->driver = &intel_840_driver;
+ name = "i840";
+ break;
+ case PCI_DEVICE_ID_INTEL_82845_HB:
+ bridge->driver = &intel_845_driver;
+ name = "i845";
+ break;
+ case PCI_DEVICE_ID_INTEL_82845G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82845G_IG)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "845G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82850_HB:
+ bridge->driver = &intel_850_driver;
+ name = "i850";
+ break;
+ case PCI_DEVICE_ID_INTEL_82855PM_HB:
+ bridge->driver = &intel_845_driver;
+ name = "855PM";
+ break;
+ case PCI_DEVICE_ID_INTEL_82855GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82855GM_IG)) {
+ bridge->driver = &intel_830_driver;
+ name = "855";
+ } else {
+ bridge->driver = &intel_845_driver;
+ name = "855GM";
+ }
+ break;
+ case PCI_DEVICE_ID_INTEL_82860_HB:
+ bridge->driver = &intel_860_driver;
+ name = "i860";
+ break;
+ case PCI_DEVICE_ID_INTEL_82865_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82865_IG)) {
+ bridge->driver = &intel_830_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "865";
+ break;
+ case PCI_DEVICE_ID_INTEL_82875_HB:
+ bridge->driver = &intel_845_driver;
+ name = "i875";
+ break;
+ case PCI_DEVICE_ID_INTEL_82915G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "915G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82915GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82915GM_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "915GM";
+ break;
+ case PCI_DEVICE_ID_INTEL_7505_0:
+ bridge->driver = &intel_7505_driver;
+ name = "E7505";
+ break;
+ case PCI_DEVICE_ID_INTEL_7205_0:
+ bridge->driver = &intel_7505_driver;
+ name = "E7205";
+ break;
+ default:
+ if (cap_ptr)
+ printk(KERN_WARNING PFX "Unsupported Intel chipset (device id: %04x)\n",
+ pdev->device);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ };
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ if (bridge->driver == &intel_810_driver)
+ bridge->dev_private_data = &intel_i810_private;
+ else if (bridge->driver == &intel_830_driver)
+ bridge->dev_private_data = &intel_i830_private;
+
+ printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", name);
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if(pci_assign_resource(pdev, 0)) {
+ printk(KERN_ERR PFX "could not assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+
+fail:
+ printk(KERN_ERR PFX "Detected an Intel %s chipset, "
+ "but could not find the secondary device.\n", name);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+}
+
+static void __devexit agp_intel_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+
+ if (intel_i810_private.i810_dev)
+ pci_dev_put(intel_i810_private.i810_dev);
+ if (intel_i830_private.i830_dev)
+ pci_dev_put(intel_i830_private.i830_dev);
+
+ agp_put_bridge(bridge);
+}
+
+static int agp_intel_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ pci_restore_state(pdev);
+
+ if (bridge->driver == &intel_generic_driver)
+ intel_configure();
+ else if (bridge->driver == &intel_850_driver)
+ intel_850_configure();
+ else if (bridge->driver == &intel_845_driver)
+ intel_845_configure();
+ else if (bridge->driver == &intel_830mp_driver)
+ intel_830mp_configure();
+ else if (bridge->driver == &intel_915_driver)
+ intel_i915_configure();
+ else if (bridge->driver == &intel_830_driver)
+ intel_i830_configure();
+ else if (bridge->driver == &intel_810_driver)
+ intel_i810_configure();
+
+ return 0;
+}
+
+static struct pci_device_id agp_intel_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC1),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC3),
+ ID(PCI_DEVICE_ID_INTEL_82810E_MC),
+ ID(PCI_DEVICE_ID_INTEL_82815_MC),
+ ID(PCI_DEVICE_ID_INTEL_82820_HB),
+ ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
+ ID(PCI_DEVICE_ID_INTEL_82830_HB),
+ ID(PCI_DEVICE_ID_INTEL_82840_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82860_HB),
+ ID(PCI_DEVICE_ID_INTEL_82865_HB),
+ ID(PCI_DEVICE_ID_INTEL_82875_HB),
+ ID(PCI_DEVICE_ID_INTEL_7505_0),
+ ID(PCI_DEVICE_ID_INTEL_7205_0),
+ ID(PCI_DEVICE_ID_INTEL_82915G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+
+static struct pci_driver agp_intel_pci_driver = {
+ .name = "agpgart-intel",
+ .id_table = agp_intel_pci_table,
+ .probe = agp_intel_probe,
+ .remove = __devexit_p(agp_intel_remove),
+ .resume = agp_intel_resume,
+};
+
+static int __init agp_intel_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_pci_driver);
+}
+
+static void __exit agp_intel_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_pci_driver);
+}
+
+module_init(agp_intel_init);
+module_exit(agp_intel_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
new file mode 100644
index 00000000000..c9ac731504f
--- /dev/null
+++ b/drivers/char/agp/isoch.c
@@ -0,0 +1,470 @@
+/*
+ * Setup routines for AGP 3.5 compliant bridges.
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+
+#include "agp.h"
+
+/* Generic AGP 3.5 enabling routines */
+
+struct agp_3_5_dev {
+ struct list_head list;
+ u8 capndx;
+ u32 maxbw;
+ struct pci_dev *dev;
+};
+
+static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
+{
+ struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
+ struct list_head *pos;
+
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ if(cur->maxbw > n->maxbw)
+ break;
+ }
+ list_add_tail(new, pos);
+}
+
+static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct pci_dev *dev;
+ struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
+ u32 nistat;
+
+ INIT_LIST_HEAD(head);
+
+ for (pos=start; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
+ cur->maxbw = (nistat >> 16) & 0xff;
+
+ tmp = pos;
+ pos = pos->next;
+ agp_3_5_dev_list_insert(head, tmp);
+ }
+}
+
+/*
+ * Initialize all isochronous transfer parameters for an AGP 3.0
+ * node (i.e. a host bridge in combination with the adapters
+ * lying behind it...)
+ */
+
+static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ /*
+ * Convenience structure to make the calculations clearer
+ * here. The field names come straight from the AGP 3.0 spec.
+ */
+ struct isoch_data {
+ u32 maxbw;
+ u32 n;
+ u32 y;
+ u32 l;
+ u32 rq;
+ struct agp_3_5_dev *dev;
+ };
+
+ struct pci_dev *td = bridge->dev, *dev;
+ struct list_head *head = &dev_list->list, *pos;
+ struct agp_3_5_dev *cur;
+ struct isoch_data *master, target;
+ unsigned int cdev = 0;
+ u32 mnistat, tnistat, tstatus, mcmd;
+ u16 tnicmd, mnicmd;
+ u8 mcapndx;
+ u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
+ u32 step, rem, rem_isoch, rem_async;
+ int ret = 0;
+
+ /*
+ * We'll work with an array of isoch_data's (one for each
+ * device in dev_list) throughout this function.
+ */
+ if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+
+ /*
+ * Sort the device list by maxbw. We need to do this because the
+ * spec suggests that the devices with the smallest requirements
+ * have their resources allocated first, with all remaining resources
+ * falling to the device with the largest requirement.
+ *
+ * We don't exactly do this, we divide target resources by ndevs
+ * and split them amongst the AGP 3.0 devices. The remainder of such
+ * division operations are dropped on the last device, sort of like
+ * the spec mentions it should be done.
+ *
+ * We can't do this sort when we initially construct the dev_list
+ * because we don't know until this function whether isochronous
+ * transfers are enabled and consequently whether maxbw will mean
+ * anything.
+ */
+ agp_3_5_dev_list_sort(dev_list, ndevs);
+
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+
+ /* Extract power-on defaults from the target */
+ target.maxbw = (tnistat >> 16) & 0xff;
+ target.n = (tnistat >> 8) & 0xff;
+ target.y = (tnistat >> 6) & 0x3;
+ target.l = (tnistat >> 3) & 0x7;
+ target.rq = (tstatus >> 24) & 0xff;
+
+ y_max = target.y;
+
+ /*
+ * Extract power-on defaults for each device in dev_list. Along
+ * the way, calculate the total isochronous bandwidth required
+ * by these devices and the largest requested payload size.
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
+
+ master[cdev].maxbw = (mnistat >> 16) & 0xff;
+ master[cdev].n = (mnistat >> 8) & 0xff;
+ master[cdev].y = (mnistat >> 6) & 0x3;
+ master[cdev].dev = cur;
+
+ tot_bw += master[cdev].maxbw;
+ y_max = max(y_max, master[cdev].y);
+
+ cdev++;
+ }
+
+ /* Check if this configuration has any chance of working */
+ if (tot_bw > target.maxbw) {
+ printk(KERN_ERR PFX "isochronous bandwidth required "
+ "by AGP 3.0 devices exceeds that which is supported by "
+ "the AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ target.y = y_max;
+
+ /*
+ * Write the calculated payload size into the target's NICMD
+ * register. Doing this directly effects the ISOCH_N value
+ * in the target's NISTAT register, so we need to do this now
+ * to get an accurate value for ISOCH_N later.
+ */
+ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
+ tnicmd &= ~(0x3 << 6);
+ tnicmd |= target.y << 6;
+ pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
+
+ /* Reread the target's ISOCH_N */
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ target.n = (tnistat >> 8) & 0xff;
+
+ /* Calculate the minimum ISOCH_N needed by each master */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ master[cdev].y = target.y;
+ master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
+
+ tot_n += master[cdev].n;
+ }
+
+ /* Exit if the minimal ISOCH_N allocation among the masters is more
+ * than the target can handle. */
+ if (tot_n > target.n) {
+ printk(KERN_ERR PFX "number of isochronous "
+ "transactions per period required by AGP 3.0 devices "
+ "exceeds that which is supported by the AGP 3.0 "
+ "bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate left over ISOCH_N capability in the target. We'll give
+ * this to the hungriest device (as per the spec) */
+ rem = target.n - tot_n;
+
+ /*
+ * Calculate the minimum isochronous RQ depth needed by each master.
+ * Along the way, distribute the extra ISOCH_N capability calculated
+ * above.
+ */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ /*
+ * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
+ * byte isochronous writes will be broken into 64B pieces.
+ * This means we need to budget more RQ depth to account for
+ * these kind of writes (each isochronous write is actually
+ * many writes on the AGP bus).
+ */
+ master[cdev].rq = master[cdev].n;
+ if(master[cdev].y > 0x1)
+ master[cdev].rq *= (1 << (master[cdev].y - 1));
+
+ tot_rq += master[cdev].rq;
+
+ if (cdev == ndevs-1)
+ master[cdev].n += rem;
+ }
+
+ /* Figure the number of isochronous and asynchronous RQ slots the
+ * target is providing. */
+ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
+ rq_async = target.rq - rq_isoch;
+
+ /* Exit if the minimal RQ needs of the masters exceeds what the target
+ * can provide. */
+ if (tot_rq > rq_isoch) {
+ printk(KERN_ERR PFX "number of request queue slots "
+ "required by the isochronous bandwidth requested by "
+ "AGP 3.0 devices exceeds the number provided by the "
+ "AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate asynchronous RQ capability in the target (per master) as
+ * well as the total number of leftover isochronous RQ slots. */
+ step = rq_async / ndevs;
+ rem_async = step + (rq_async % ndevs);
+ rem_isoch = rq_isoch - tot_rq;
+
+ /* Distribute the extra RQ slots calculated above and write our
+ * isochronous settings out to the actual devices. */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ cur = master[cdev].dev;
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ master[cdev].rq += (cdev == ndevs - 1)
+ ? (rem_async + rem_isoch) : step;
+
+ pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
+ pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
+
+ mnicmd &= ~(0xff << 8);
+ mnicmd &= ~(0x3 << 6);
+ mcmd &= ~(0xff << 24);
+
+ mnicmd |= master[cdev].n << 8;
+ mnicmd |= master[cdev].y << 6;
+ mcmd |= master[cdev].rq << 24;
+
+ pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
+ pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
+ }
+
+free_and_exit:
+ kfree(master);
+
+get_out:
+ return ret;
+}
+
+/*
+ * This function basically allocates request queue slots among the
+ * AGP 3.0 systems in nonisochronous nodes. The algorithm is
+ * pretty stupid, divide the total number of RQ slots provided by the
+ * target by ndevs. Distribute this many slots to each AGP 3.0 device,
+ * giving any left over slots to the last device in dev_list.
+ */
+static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct list_head *head = &dev_list->list, *pos;
+ u32 tstatus, mcmd;
+ u32 trq, mrq, rem;
+ unsigned int cdev = 0;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
+
+ trq = (tstatus >> 24) & 0xff;
+ mrq = trq / ndevs;
+
+ rem = mrq + (trq % ndevs);
+
+ for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
+ mcmd &= ~(0xff << 24);
+ mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
+ pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
+ }
+}
+
+/*
+ * Fully configure and enable an AGP 3.0 host bridge and all the devices
+ * lying behind it.
+ */
+int agp_3_5_enable(struct agp_bridge_data *bridge)
+{
+ struct pci_dev *td = bridge->dev, *dev = NULL;
+ u8 mcapndx;
+ u32 isoch, arqsz;
+ u32 tstatus, mstatus, ncapid;
+ u32 mmajor;
+ u16 mpstat;
+ struct agp_3_5_dev *dev_list, *cur;
+ struct list_head *head, *pos;
+ unsigned int ndevs = 0;
+ int ret = 0;
+
+ /* Extract some power-on defaults from the target */
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+ isoch = (tstatus >> 17) & 0x1;
+ if (isoch == 0) /* isoch xfers not available, bail out. */
+ return -ENODEV;
+
+ arqsz = (tstatus >> 13) & 0x7;
+
+ /*
+ * Allocate a head for our AGP 3.5 device list
+ * (multiple AGP v3 devices are allowed behind a single bridge).
+ */
+ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+ head = &dev_list->list;
+ INIT_LIST_HEAD(head);
+
+ /* Find all AGP devices, and add them to dev_list. */
+ for_each_pci_dev(dev) {
+ mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
+ if (mcapndx == 0)
+ continue;
+
+ switch ((dev->class >>8) & 0xff00) {
+ case 0x0600: /* Bridge */
+ /* Skip bridges. We should call this function for each one. */
+ continue;
+
+ case 0x0001: /* Unclassified device */
+ /* Don't know what this is, but log it for investigation. */
+ if (mcapndx != 0) {
+ printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n",
+ dev->vendor, dev->device);
+ }
+ continue;
+
+ case 0x0300: /* Display controller */
+ case 0x0400: /* Multimedia controller */
+ if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto free_and_exit;
+ }
+ cur->dev = dev;
+
+ pos = &cur->list;
+ list_add(pos, head);
+ ndevs++;
+ continue;
+
+ default:
+ continue;
+ }
+ }
+
+ /*
+ * Take an initial pass through the devices lying behind our host
+ * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
+ * exit with an error message. Along the way store the AGP 3.0
+ * cap_ptr for each device
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_word(dev, PCI_STATUS, &mpstat);
+ if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
+ continue;
+
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
+ if (mcapndx != 0) {
+ do {
+ pci_read_config_dword(dev, mcapndx, &ncapid);
+ if ((ncapid & 0xff) != 2)
+ mcapndx = (ncapid >> 8) & 0xff;
+ }
+ while (((ncapid & 0xff) != 2) && (mcapndx != 0));
+ }
+
+ if (mcapndx == 0) {
+ printk(KERN_ERR PFX "woah! Non-AGP device "
+ "found on the secondary bus of an AGP 3.5 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ if (mmajor < 3) {
+ printk(KERN_ERR PFX "woah! AGP 2.0 device "
+ "found on the secondary bus of an AGP 3.5 "
+ "bridge operating with AGP 3.0 electricals!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ cur->capndx = mcapndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
+
+ if (((mstatus >> 3) & 0x1) == 0) {
+ printk(KERN_ERR PFX "woah! AGP 3.x device "
+ "not operating in AGP 3.x mode found on the "
+ "secondary bus of an AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+ }
+
+ /*
+ * Call functions to divide target resources amongst the AGP 3.0
+ * masters. This process is dramatically different depending on
+ * whether isochronous transfers are supported.
+ */
+ if (isoch) {
+ ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
+ if (ret) {
+ printk(KERN_INFO PFX "Something bad happened setting "
+ "up isochronous xfers. Falling back to "
+ "non-isochronous xfer mode.\n");
+ } else {
+ goto free_and_exit;
+ }
+ }
+ agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
+
+free_and_exit:
+ /* Be sure to free the dev_list */
+ for (pos=head->next; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pos = pos->next;
+ kfree(cur);
+ }
+ kfree(dev_list);
+
+get_out:
+ return ret;
+}
+
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
new file mode 100644
index 00000000000..4f7a3e8bc91
--- /dev/null
+++ b/drivers/char/agp/nvidia-agp.c
@@ -0,0 +1,424 @@
+/*
+ * Nvidia AGPGART routines.
+ * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
+ * to work in 2.5 by Dave Jones <davej@codemonkey.org.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+
+/* NVIDIA registers */
+#define NVIDIA_0_APSIZE 0x80
+#define NVIDIA_1_WBC 0xf0
+#define NVIDIA_2_GARTCTRL 0xd0
+#define NVIDIA_2_APBASE 0xd8
+#define NVIDIA_2_APLIMIT 0xdc
+#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
+#define NVIDIA_3_APBASE 0x50
+#define NVIDIA_3_APLIMIT 0x54
+
+
+static struct _nvidia_private {
+ struct pci_dev *dev_1;
+ struct pci_dev *dev_2;
+ struct pci_dev *dev_3;
+ volatile u32 __iomem *aperture;
+ int num_active_entries;
+ off_t pg_offset;
+ u32 wbc_mask;
+} nvidia_private;
+
+
+static int nvidia_fetch_size(void)
+{
+ int i;
+ u8 size_value;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
+ size_value &= 0x0f;
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (size_value == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+#define SYSCFG 0xC0010010
+#define IORR_BASE0 0xC0010016
+#define IORR_MASK0 0xC0010017
+#define AMD_K7_NUM_IORR 2
+
+static int nvidia_init_iorr(u32 base, u32 size)
+{
+ u32 base_hi, base_lo;
+ u32 mask_hi, mask_lo;
+ u32 sys_hi, sys_lo;
+ u32 iorr_addr, free_iorr_addr;
+
+ /* Find the iorr that is already used for the base */
+ /* If not found, determine the uppermost available iorr */
+ free_iorr_addr = AMD_K7_NUM_IORR;
+ for(iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
+ rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ if ((base_lo & 0xfffff000) == (base & 0xfffff000))
+ break;
+
+ if ((mask_lo & 0x00000800) == 0)
+ free_iorr_addr = iorr_addr;
+ }
+
+ if (iorr_addr >= AMD_K7_NUM_IORR) {
+ iorr_addr = free_iorr_addr;
+ if (iorr_addr >= AMD_K7_NUM_IORR)
+ return -EINVAL;
+ }
+ base_hi = 0x0;
+ base_lo = (base & ~0xfff) | 0x18;
+ mask_hi = 0xf;
+ mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
+ wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ rdmsr(SYSCFG, sys_lo, sys_hi);
+ sys_lo |= 0x00100000;
+ wrmsr(SYSCFG, sys_lo, sys_hi);
+
+ return 0;
+}
+
+static int nvidia_configure(void)
+{
+ int i, rc, num_dirs;
+ u32 apbase, aplimit;
+ struct aper_size_info_8 *current_size;
+ u32 temp;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase);
+ apbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ agp_bridge->gart_bus_addr = apbase;
+ aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
+ if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
+ return rc;
+
+ /* directory size is 64k */
+ num_dirs = current_size->size / 64;
+ nvidia_private.num_active_entries = current_size->num_entries;
+ nvidia_private.pg_offset = 0;
+ if (num_dirs == 0) {
+ num_dirs = 1;
+ nvidia_private.num_active_entries /= (64 / current_size->size);
+ nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
+ ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
+ }
+
+ /* attbase */
+ for(i = 0; i < 8; i++) {
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
+ (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
+ }
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
+
+ /* map aperture */
+ nvidia_private.aperture =
+ (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE);
+
+ return 0;
+}
+
+static void nvidia_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+ u32 temp;
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
+
+ /* unmap aperture */
+ iounmap((void __iomem *) nvidia_private.aperture);
+
+ /* restore previous aperture size */
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ previous_size->size_value);
+
+ /* restore iorr for previous aperture size */
+ nvidia_init_iorr(agp_bridge->gart_bus_addr,
+ previous_size->size * 1024 * 1024);
+}
+
+
+/*
+ * Note we can't use the generic routines, even though they are 99% the same.
+ * Aperture sizes <64M still requires a full 64k GART directory, but
+ * only use the portion of the TLB entries that correspond to the apertures
+ * alignment inside the surrounding 64M block.
+ */
+extern int agp_memory_reserved;
+
+static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j;
+
+ if ((type != 0) || (mem->type != 0))
+ return -EINVAL;
+
+ if ((pg_start + mem->page_count) >
+ (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
+ return -EINVAL;
+
+ for(j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
+ return -EBUSY;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i], mem->type),
+ agp_bridge->gatt_table+nvidia_private.pg_offset+j);
+ readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */
+ }
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+
+ if ((type != 0) || (mem->type != 0))
+ return -EINVAL;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static void nvidia_tlbflush(struct agp_memory *mem)
+{
+ unsigned long end;
+ u32 wbc_reg, temp;
+ int i;
+
+ /* flush chipset */
+ if (nvidia_private.wbc_mask) {
+ pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
+ wbc_reg |= nvidia_private.wbc_mask;
+ pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
+
+ end = jiffies + 3*HZ;
+ do {
+ pci_read_config_dword(nvidia_private.dev_1,
+ NVIDIA_1_WBC, &wbc_reg);
+ if ((signed)(end - jiffies) <= 0) {
+ printk(KERN_ERR PFX
+ "TLB flush took more than 3 seconds.\n");
+ }
+ } while (wbc_reg & nvidia_private.wbc_mask);
+ }
+
+ /* flush TLB entries */
+ for(i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+ for(i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+}
+
+
+static struct aper_size_info_8 nvidia_generic_sizes[5] =
+{
+ {512, 131072, 7, 0},
+ {256, 65536, 6, 8},
+ {128, 32768, 5, 12},
+ {64, 16384, 4, 14},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 16384, 4, 15}
+};
+
+
+static struct gatt_mask nvidia_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+struct agp_bridge_driver nvidia_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = nvidia_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 5,
+ .configure = nvidia_configure,
+ .fetch_size = nvidia_fetch_size,
+ .cleanup = nvidia_cleanup,
+ .tlb_flush = nvidia_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = nvidia_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = nvidia_insert_memory,
+ .remove_memory = nvidia_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ nvidia_private.dev_1 =
+ pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
+ nvidia_private.dev_2 =
+ pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
+ nvidia_private.dev_3 =
+ pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));
+
+ if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
+ printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
+ "chipset, but could not find the secondary devices.\n");
+ return -ENODEV;
+ }
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_NVIDIA_NFORCE:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
+ nvidia_private.wbc_mask = 0x00010000;
+ break;
+ case PCI_DEVICE_ID_NVIDIA_NFORCE2:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
+ nvidia_private.wbc_mask = 0x80000000;
+ break;
+ default:
+ printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &nvidia_driver;
+ bridge->dev_private_data = &nvidia_private,
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_nvidia_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_nvidia_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
+
+static struct pci_driver agp_nvidia_pci_driver = {
+ .name = "agpgart-nvidia",
+ .id_table = agp_nvidia_pci_table,
+ .probe = agp_nvidia_probe,
+ .remove = agp_nvidia_remove,
+};
+
+static int __init agp_nvidia_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_nvidia_pci_driver);
+}
+
+static void __exit agp_nvidia_cleanup(void)
+{
+ pci_unregister_driver(&agp_nvidia_pci_driver);
+}
+
+module_init(agp_nvidia_init);
+module_exit(agp_nvidia_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("NVIDIA Corporation");
+
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
new file mode 100644
index 00000000000..4b3eda26797
--- /dev/null
+++ b/drivers/char/agp/sgi-agp.c
@@ -0,0 +1,331 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * SGI TIOCA AGPGART routines.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioca_provider.h>
+#include "agp.h"
+
+extern int agp_memory_reserved;
+extern uint32_t tioca_gart_found;
+extern struct list_head tioca_list;
+static struct agp_bridge_data **sgi_tioca_agp_bridges;
+
+/*
+ * The aperature size and related information is set up at TIOCA init time.
+ * Values for this table will be extracted and filled in at
+ * sgi_tioca_fetch_size() time.
+ */
+
+static struct aper_size_info_fixed sgi_tioca_sizes[] = {
+ {0, 0, 0},
+};
+
+static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page *page;
+ int nid;
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)bridge->dev_private_data;
+
+ nid = info->ca_closest_node;
+ page = alloc_pages_node(nid, GFP_KERNEL, 0);
+ if (page == NULL) {
+ return 0;
+ }
+
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+
+/*
+ * Flush GART tlb's. Cannot selectively flush based on memory so the mem
+ * arg is ignored.
+ */
+
+static void sgi_tioca_tlbflush(struct agp_memory *mem)
+{
+ tioca_tlbflush(mem->bridge->dev_private_data);
+}
+
+/*
+ * Given an address of a host physical page, turn it into a valid gart
+ * entry.
+ */
+static unsigned long
+sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
+ unsigned long addr, int type)
+{
+ return tioca_physpage_to_gart(addr);
+}
+
+static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ tioca_fastwrite_enable(bridge->dev_private_data);
+}
+
+/*
+ * sgi_tioca_configure() doesn't have anything to do since the base CA driver
+ * has alreay set up the GART.
+ */
+
+static int sgi_tioca_configure(void)
+{
+ return 0;
+}
+
+/*
+ * Determine gfx aperature size. This has already been determined by the
+ * CA driver init, so just need to set agp_bridge values accordingly.
+ */
+
+static int sgi_tioca_fetch_size(void)
+{
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)agp_bridge->dev_private_data;
+
+ sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
+ sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
+
+ return sgi_tioca_sizes[0].size;
+}
+
+static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)bridge->dev_private_data;
+
+ bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
+ bridge->gatt_table = bridge->gatt_table_real;
+ bridge->gatt_bus_addr = info->ca_gfxgart_base;
+
+ return 0;
+}
+
+static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ return -EINVAL;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved / PAGE_SIZE;
+ if (num_entries < 0)
+ num_entries = 0;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (*(bridge->gatt_table + j))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ *(bridge->gatt_table + j) =
+ bridge->driver->mask_memory(bridge, mem->memory[i],
+ mem->type);
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ *(bridge->gatt_table + i) = 0;
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static void sgi_tioca_cache_flush(void)
+{
+}
+
+/*
+ * Cleanup. Nothing to do as the CA driver owns the GART.
+ */
+
+static void sgi_tioca_cleanup(void)
+{
+}
+
+static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ list_for_each_entry(bridge, &agp_bridges, list) {
+ if (bridge->dev->bus == pdev->bus)
+ break;
+ }
+ return bridge;
+}
+
+struct agp_bridge_driver sgi_tioca_driver = {
+ .owner = THIS_MODULE,
+ .size_type = U16_APER_SIZE,
+ .configure = sgi_tioca_configure,
+ .fetch_size = sgi_tioca_fetch_size,
+ .cleanup = sgi_tioca_cleanup,
+ .tlb_flush = sgi_tioca_tlbflush,
+ .mask_memory = sgi_tioca_mask_memory,
+ .agp_enable = sgi_tioca_agp_enable,
+ .cache_flush = sgi_tioca_cache_flush,
+ .create_gatt_table = sgi_tioca_create_gatt_table,
+ .free_gatt_table = sgi_tioca_free_gatt_table,
+ .insert_memory = sgi_tioca_insert_memory,
+ .remove_memory = sgi_tioca_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = sgi_tioca_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .cant_use_aperture = 1,
+ .needs_scratch_page = 0,
+ .num_aperture_sizes = 1,
+};
+
+static int __devinit agp_sgi_init(void)
+{
+ unsigned int j;
+ struct tioca_kernel *info;
+ struct pci_dev *pdev = NULL;
+
+ if (tioca_gart_found)
+ printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
+ else
+ return 0;
+
+ sgi_tioca_agp_bridges =
+ (struct agp_bridge_data **)kmalloc(tioca_gart_found *
+ sizeof(struct agp_bridge_data *),
+ GFP_KERNEL);
+
+ j = 0;
+ list_for_each_entry(info, &tioca_list, ca_list) {
+ struct list_head *tmp;
+ list_for_each(tmp, info->ca_devices) {
+ u8 cap_ptr;
+ pdev = pci_dev_b(tmp);
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
+ continue;
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ continue;
+ }
+ sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
+ printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
+ sgi_tioca_agp_bridges[j]);
+ if (sgi_tioca_agp_bridges[j]) {
+ sgi_tioca_agp_bridges[j]->dev = pdev;
+ sgi_tioca_agp_bridges[j]->dev_private_data = info;
+ sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
+ sgi_tioca_agp_bridges[j]->gart_bus_addr =
+ info->ca_gfxap_base;
+ sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
+ (0x1 << 9) | /* SBA supported */
+ (0x1 << 5) | /* 64-bit addresses supported */
+ (0x1 << 4) | /* FW supported */
+ (0x1 << 3) | /* AGP 3.0 mode */
+ 0x2; /* 8x transfer only */
+ sgi_tioca_agp_bridges[j]->current_size =
+ sgi_tioca_agp_bridges[j]->previous_size =
+ (void *)&sgi_tioca_sizes[0];
+ agp_add_bridge(sgi_tioca_agp_bridges[j]);
+ }
+ j++;
+ }
+
+ agp_find_bridge = &sgi_tioca_find_bridge;
+ return 0;
+}
+
+static void __devexit agp_sgi_cleanup(void)
+{
+ if(sgi_tioca_agp_bridges)
+ kfree(sgi_tioca_agp_bridges);
+ sgi_tioca_agp_bridges=NULL;
+}
+
+module_init(agp_sgi_init);
+module_exit(agp_sgi_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
new file mode 100644
index 00000000000..cfccacb2a64
--- /dev/null
+++ b/drivers/char/agp/sis-agp.c
@@ -0,0 +1,360 @@
+/*
+ * SiS AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include "agp.h"
+
+#define SIS_ATTBASE 0x90
+#define SIS_APSIZE 0x94
+#define SIS_TLBCNTRL 0x97
+#define SIS_TLBFLUSH 0x98
+
+static int __devinitdata agp_sis_force_delay = 0;
+static int __devinitdata agp_sis_agp_spec = -1;
+
+static int sis_fetch_size(void)
+{
+ u8 temp_size;
+ int i;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if ((temp_size == values[i].size_value) ||
+ ((temp_size & ~(0x03)) ==
+ (values[i].size_value & ~(0x03)))) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void sis_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
+ agp_bridge->gatt_bus_addr);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ current_size->size_value);
+ return 0;
+}
+
+static void sis_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ (previous_size->size_value & ~(0x03)));
+}
+
+static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command;
+ int rate;
+
+ printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
+ agp_bridge->major_version,
+ agp_bridge->minor_version,
+ pci_name(agp_bridge->dev));
+
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= AGPSTAT_AGP_ENABLE;
+ rate = (command & 0x7) << 2;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
+ pci_name(device), rate);
+
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
+
+ /*
+ * Weird: on some sis chipsets any rate change in the target
+ * command register triggers a 5ms screwup during which the master
+ * cannot be configured
+ */
+ if (device->device == bridge->dev->device) {
+ printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
+ msleep(10);
+ }
+ }
+}
+
+static struct aper_size_info_8 sis_generic_sizes[7] =
+{
+ {256, 65536, 6, 99},
+ {128, 32768, 5, 83},
+ {64, 16384, 4, 67},
+ {32, 8192, 3, 51},
+ {16, 4096, 2, 35},
+ {8, 2048, 1, 19},
+ {4, 1024, 0, 3}
+};
+
+struct agp_bridge_driver sis_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = sis_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = sis_configure,
+ .fetch_size = sis_fetch_size,
+ .cleanup = sis_cleanup,
+ .tlb_flush = sis_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_SI_5591_AGP,
+ .chipset_name = "5591",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_530,
+ .chipset_name = "530",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_540,
+ .chipset_name = "540",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_550,
+ .chipset_name = "550",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_620,
+ .chipset_name = "620",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_630,
+ .chipset_name = "630",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_635,
+ .chipset_name = "635",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_645,
+ .chipset_name = "645",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_646,
+ .chipset_name = "646",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_648,
+ .chipset_name = "648",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_650,
+ .chipset_name = "650",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_651,
+ .chipset_name = "651",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_655,
+ .chipset_name = "655",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_661,
+ .chipset_name = "661",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_730,
+ .chipset_name = "730",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_735,
+ .chipset_name = "735",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_740,
+ .chipset_name = "740",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_741,
+ .chipset_name = "741",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_745,
+ .chipset_name = "745",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_746,
+ .chipset_name = "746",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_SI_760,
+ .chipset_name = "760",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+
+// chipsets that require the 'delay hack'
+static int sis_broken_chipsets[] __devinitdata = {
+ PCI_DEVICE_ID_SI_648,
+ PCI_DEVICE_ID_SI_746,
+ 0 // terminator
+};
+
+static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
+{
+ int i;
+
+ for(i=0; sis_broken_chipsets[i]!=0; ++i)
+ if(bridge->dev->device==sis_broken_chipsets[i])
+ break;
+
+ if(sis_broken_chipsets[i] || agp_sis_force_delay)
+ sis_driver.agp_enable=sis_delayed_enable;
+
+ // sis chipsets that indicate less than agp3.5
+ // are not actually fully agp3 compliant
+ if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
+ && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
+ sis_driver.aperture_sizes = agp3_generic_sizes;
+ sis_driver.size_type = U16_APER_SIZE;
+ sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
+ sis_driver.configure = agp3_generic_configure;
+ sis_driver.fetch_size = agp3_generic_fetch_size;
+ sis_driver.cleanup = agp3_generic_cleanup;
+ sis_driver.tlb_flush = agp3_generic_tlbflush;
+ }
+}
+
+
+static int __devinit agp_sis_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = sis_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id) {
+ printk(KERN_INFO PFX "Detected SiS %s chipset\n",
+ devs[j].chipset_name);
+ goto found;
+ }
+ }
+
+ printk(KERN_ERR PFX "Unsupported SiS chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sis_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ get_agp_version(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+ sis_get_driver(bridge);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_sis_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_sis_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+
+static struct pci_driver agp_sis_pci_driver = {
+ .name = "agpgart-sis",
+ .id_table = agp_sis_pci_table,
+ .probe = agp_sis_probe,
+ .remove = agp_sis_remove,
+};
+
+static int __init agp_sis_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_sis_pci_driver);
+}
+
+static void __exit agp_sis_cleanup(void)
+{
+ pci_unregister_driver(&agp_sis_pci_driver);
+}
+
+module_init(agp_sis_init);
+module_exit(agp_sis_cleanup);
+
+module_param(agp_sis_force_delay, bool, 0);
+MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
+module_param(agp_sis_agp_spec, int, 0);
+MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
new file mode 100644
index 00000000000..bb338d9134e
--- /dev/null
+++ b/drivers/char/agp/sworks-agp.c
@@ -0,0 +1,556 @@
+/*
+ * Serverworks AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+#define SVWRKS_COMMAND 0x04
+#define SVWRKS_APSIZE 0x10
+#define SVWRKS_MMBASE 0x14
+#define SVWRKS_CACHING 0x4b
+#define SVWRKS_AGP_ENABLE 0x60
+#define SVWRKS_FEATURE 0x68
+
+#define SVWRKS_SIZE_MASK 0xfe000000
+
+/* Memory mapped registers */
+#define SVWRKS_GART_CACHE 0x02
+#define SVWRKS_GATTBASE 0x04
+#define SVWRKS_TLBFLUSH 0x10
+#define SVWRKS_POSTFLUSH 0x14
+#define SVWRKS_DIRFLUSH 0x0c
+
+
+struct serverworks_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _serverworks_private {
+ struct pci_dev *svrwrks_dev; /* device one */
+ volatile u8 __iomem *registers;
+ struct serverworks_page_map **gatt_pages;
+ int num_tables;
+ struct serverworks_page_map scratch_dir;
+
+ int gart_addr_ofs;
+ int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(struct serverworks_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL) {
+ return -ENOMEM;
+ }
+ SetPageReserved(virt_to_page(page_map->real));
+ global_cache_flush();
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ PAGE_SIZE);
+ if (page_map->remapped == NULL) {
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+ page_map->real = NULL;
+ return -ENOMEM;
+ }
+ global_cache_flush();
+
+ for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+
+ return 0;
+}
+
+static void serverworks_free_page_map(struct serverworks_page_map *page_map)
+{
+ iounmap(page_map->remapped);
+ ClearPageReserved(virt_to_page(page_map->real));
+ free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+ int i;
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+
+ tables = serverworks_private.gatt_pages;
+ for(i = 0; i < serverworks_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL) {
+ serverworks_free_page_map(entry);
+ }
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL) {
+ return -ENOMEM;
+ }
+ memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
+ for (i = 0; i < nr_tables; i++) {
+ entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ memset(entry, 0, sizeof(struct serverworks_page_map));
+ tables[i] = entry;
+ retval = serverworks_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ serverworks_private.num_tables = nr_tables;
+ serverworks_private.gatt_pages = tables;
+
+ if (retval != 0) serverworks_free_gatt_pages();
+
+ return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct serverworks_page_map page_dir;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = serverworks_create_page_map(&page_dir);
+ if (retval != 0) {
+ return retval;
+ }
+ retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+ /* Create a fake scratch directory */
+ for(i = 0; i < 1024; i++) {
+ writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ }
+
+ retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* Calculate the agp offset */
+
+ for(i = 0; i < value->num_entries / 1024; i++)
+ writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+
+ return 0;
+}
+
+static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct serverworks_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ serverworks_free_gatt_pages();
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ u32 temp2;
+ struct aper_size_info_lvl2 *values;
+
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
+ SVWRKS_SIZE_MASK);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
+ temp2 &= SVWRKS_SIZE_MASK;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp2 == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+static void serverworks_tlbflush(struct agp_memory *temp)
+{
+ writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
+ while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1)
+ cpu_relax();
+
+ writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
+ while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1)
+ cpu_relax();
+}
+
+static int serverworks_configure(void)
+{
+ struct aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u8 enable_reg;
+ u16 cap_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ if (!serverworks_private.registers) {
+ printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
+ return -ENOMEM;
+ }
+
+ writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
+ readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
+
+ writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
+ readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
+
+ cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
+ cap_reg &= ~0x0007;
+ cap_reg |= 0x4;
+ writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
+ readw(serverworks_private.registers+SVWRKS_COMMAND);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
+ enable_reg |= 0x1; /* Agp Enable bit */
+ pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
+ serverworks_tlbflush(NULL);
+
+ agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
+ enable_reg &= ~0x3;
+ pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
+ enable_reg |= (1<<6);
+ pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
+
+ return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+ iounmap((void __iomem *) serverworks_private.registers);
+}
+
+static int serverworks_insert_memory(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ global_cache_flush();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ }
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ global_cache_flush();
+ serverworks_tlbflush(mem);
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static struct gatt_mask serverworks_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+static struct aper_size_info_lvl2 serverworks_sizes[7] =
+{
+ {2048, 524288, 0x80000000},
+ {1024, 262144, 0xc0000000},
+ {512, 131072, 0xe0000000},
+ {256, 65536, 0xf0000000},
+ {128, 32768, 0xf8000000},
+ {64, 16384, 0xfc000000},
+ {32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command;
+
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &command);
+
+ command = agp_collect_device_status(bridge, mode, command);
+
+ command &= ~0x10; /* disable FW */
+ command &= ~0x08;
+
+ command |= 0x100;
+
+ pci_write_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+
+ agp_device_command(command, 0);
+}
+
+struct agp_bridge_driver sworks_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = serverworks_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = serverworks_configure,
+ .fetch_size = serverworks_fetch_size,
+ .cleanup = serverworks_cleanup,
+ .tlb_flush = serverworks_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = serverworks_masks,
+ .agp_enable = serverworks_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = serverworks_create_gatt_table,
+ .free_gatt_table = serverworks_free_gatt_table,
+ .insert_memory = serverworks_insert_memory,
+ .remove_memory = serverworks_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ struct pci_dev *bridge_dev;
+ u32 temp, temp2;
+ u8 cap_ptr = 0;
+
+ /* Everything is on func 1 here so we are hardcoding function one */
+ bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
+ PCI_DEVFN(0, 1));
+ if (!bridge_dev) {
+ printk(KERN_INFO PFX "Detected a Serverworks chipset "
+ "but could not find the secondary device.\n");
+ return -ENODEV;
+ }
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ switch (pdev->device) {
+ case 0x0006:
+ /* ServerWorks CNB20HE
+ Fail silently.*/
+ printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
+ return -ENODEV;
+
+ case PCI_DEVICE_ID_SERVERWORKS_HE:
+ case PCI_DEVICE_ID_SERVERWORKS_LE:
+ case 0x0007:
+ break;
+
+ default:
+ if (cap_ptr)
+ printk(KERN_ERR PFX "Unsupported Serverworks chipset "
+ "(device id: %04x)\n", pdev->device);
+ return -ENODEV;
+ }
+
+ serverworks_private.svrwrks_dev = bridge_dev;
+ serverworks_private.gart_addr_ofs = 0x10;
+
+ pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
+ if (temp2 != 0) {
+ printk(KERN_INFO PFX "Detected 64 bit aperture address, "
+ "but top bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ serverworks_private.mm_addr_ofs = 0x18;
+ } else
+ serverworks_private.mm_addr_ofs = 0x14;
+
+ pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev,
+ serverworks_private.mm_addr_ofs + 4, &temp2);
+ if (temp2 != 0) {
+ printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
+ "but top bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sworks_driver;
+ bridge->dev_private_data = &serverworks_private,
+ bridge->dev = pdev;
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_serverworks_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
+
+static struct pci_driver agp_serverworks_pci_driver = {
+ .name = "agpgart-serverworks",
+ .id_table = agp_serverworks_pci_table,
+ .probe = agp_serverworks_probe,
+ .remove = agp_serverworks_remove,
+};
+
+static int __init agp_serverworks_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_serverworks_pci_driver);
+}
+
+static void __exit agp_serverworks_cleanup(void)
+{
+ pci_unregister_driver(&agp_serverworks_pci_driver);
+}
+
+module_init(agp_serverworks_init);
+module_exit(agp_serverworks_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
new file mode 100644
index 00000000000..0f248239b4b
--- /dev/null
+++ b/drivers/char/agp/uninorth-agp.c
@@ -0,0 +1,647 @@
+/*
+ * UniNorth AGPGART routines.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <asm/uninorth.h>
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include "agp.h"
+
+/*
+ * NOTES for uninorth3 (G5 AGP) supports :
+ *
+ * There maybe also possibility to have bigger cache line size for
+ * agp (see pmac_pci.c and look for cache line). Need to be investigated
+ * by someone.
+ *
+ * PAGE size are hardcoded but this may change, see asm/page.h.
+ *
+ * Jerome Glisse <j.glisse@gmail.com>
+ */
+static int uninorth_rev;
+static int is_u3;
+
+static int uninorth_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_BASE, &temp);
+ temp &= ~(0xfffff000);
+ values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+
+ return 0;
+}
+
+static void uninorth_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl = UNI_N_CFG_GART_ENABLE;
+
+ if (is_u3)
+ ctrl |= U3_N_CFG_GART_PERFRD;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_INVAL);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
+
+ if (uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl);
+ }
+}
+
+static void uninorth_cleanup(void)
+{
+ u32 tmp;
+
+ pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
+ if (!(tmp & UNI_N_CFG_GART_ENABLE))
+ return;
+ tmp |= UNI_N_CFG_GART_INVAL;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
+
+ if (uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ 0);
+ }
+}
+
+static int uninorth_configure(void)
+{
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ printk(KERN_INFO PFX "configuring for size idx: %d\n",
+ current_size->size_value);
+
+ /* aperture size and gatt addr */
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_BASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000)
+ | current_size->size_value);
+
+ /* HACK ALERT
+ * UniNorth seem to be buggy enough not to handle properly when
+ * the AGP aperture isn't mapped at bus physical address 0
+ */
+ agp_bridge->gart_bus_addr = 0;
+#ifdef CONFIG_PPC64
+ /* Assume U3 or later on PPC64 systems */
+ /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
+ (agp_bridge->gatt_bus_addr >> 32) & 0xf);
+#else
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
+#endif
+
+ if (is_u3) {
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_DUMMY_PAGE,
+ agp_bridge->scratch_page_real >> 12);
+ }
+
+ return 0;
+}
+
+static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ if (type != 0 || mem->type != 0)
+ /* We know nothing of memory types */
+ return -EINVAL;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (agp_bridge->gatt_table[j])
+ return -EBUSY;
+ j++;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ agp_bridge->gatt_table[j] =
+ cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL);
+ flush_dcache_range((unsigned long)__va(mem->memory[i]),
+ (unsigned long)__va(mem->memory[i])+0x1000);
+ }
+ (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
+ mb();
+ flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start],
+ (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]);
+
+ uninorth_tlbflush(mem);
+ return 0;
+}
+
+static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, num_entries;
+ void *temp;
+ u32 *gp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ if (type != 0 || mem->type != 0)
+ /* We know nothing of memory types */
+ return -EINVAL;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i) {
+ if (gp[i]) {
+ printk("u3_insert_memory: entry 0x%x occupied (%x)\n",
+ i, gp[i]);
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < mem->page_count; i++) {
+ gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL;
+ flush_dcache_range((unsigned long)__va(mem->memory[i]),
+ (unsigned long)__va(mem->memory[i])+0x1000);
+ }
+ mb();
+ flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ u32 *gp;
+
+ if (type != 0 || mem->type != 0)
+ /* We know nothing of memory types */
+ return -EINVAL;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i)
+ gp[i] = 0;
+ mb();
+ flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command, scratch, status;
+ int timeout;
+
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &status);
+
+ command = agp_collect_device_status(bridge, mode, status);
+ command |= PCI_AGP_COMMAND_AGP;
+
+ if (uninorth_rev == 0x21) {
+ /*
+ * Darwin disable AGP 4x on this revision, thus we
+ * may assume it's broken. This is an AGP2 controller.
+ */
+ command &= ~AGPSTAT2_4X;
+ }
+
+ if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
+ /*
+ * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
+ * 2.2 and 2.3, Darwin do so.
+ */
+ if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
+ command = (command & ~AGPSTAT_RQ_DEPTH)
+ | (7 << AGPSTAT_RQ_DEPTH_SHIFT);
+ }
+
+ uninorth_tlbflush(NULL);
+
+ timeout = 0;
+ do {
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ &scratch);
+ } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
+ if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
+ printk(KERN_ERR PFX "failed to write UniNorth AGP command reg\n");
+
+ if (uninorth_rev >= 0x30) {
+ /* This is an AGP V3 */
+ agp_device_command(command, (status & AGPSTAT_MODE_3_0));
+ } else {
+ /* AGP V2 */
+ agp_device_command(command, 0);
+ }
+
+ uninorth_tlbflush(NULL);
+}
+
+#ifdef CONFIG_PM
+static int agp_uninorth_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ u32 cmd;
+ u8 agp;
+ struct pci_dev *device = NULL;
+
+ if (state != PMSG_SUSPEND)
+ return 0;
+
+ /* turn off AGP on the video chip, if it was enabled */
+ for_each_pci_dev(device) {
+ /* Don't touch the bridge yet, device first */
+ if (device == pdev)
+ continue;
+ /* Only deal with devices on the same bus here, no Mac has a P2P
+ * bridge on the AGP port, and mucking around the entire PCI
+ * tree is source of problems on some machines because of a bug
+ * in some versions of pci_find_capability() when hitting a dead
+ * device
+ */
+ if (device->bus != pdev->bus)
+ continue;
+ agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+ pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
+ if (!(cmd & PCI_AGP_COMMAND_AGP))
+ continue;
+ printk("uninorth-agp: disabling AGP on device %s\n",
+ pci_name(device));
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
+ }
+
+ /* turn off AGP on the bridge */
+ agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
+ if (cmd & PCI_AGP_COMMAND_AGP) {
+ printk("uninorth-agp: disabling AGP on bridge %s\n",
+ pci_name(pdev));
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
+ }
+ /* turn off the GART */
+ uninorth_cleanup();
+
+ return 0;
+}
+
+static int agp_uninorth_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+
+static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* We can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ do {
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+
+ if (table == NULL) {
+ i++;
+ bridge->current_size = A_IDX32(bridge);
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ SetPageReserved(page);
+
+ bridge->gatt_table_real = (u32 *) table;
+ bridge->gatt_table = (u32 *)table;
+ bridge->gatt_bus_addr = virt_to_phys(table);
+
+ for (i = 0; i < num_entries; i++)
+ bridge->gatt_table[i] = 0;
+
+ flush_dcache_range((unsigned long)table, (unsigned long)table_end);
+
+ return 0;
+}
+
+static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+ page_order = A_SIZE_32(temp)->page_order;
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table.
+ */
+
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_pages((unsigned long) bridge->gatt_table_real, page_order);
+
+ return 0;
+}
+
+void null_cache_flush(void)
+{
+ mb();
+}
+
+/* Setup function */
+
+static struct aper_size_info_32 uninorth_sizes[7] =
+{
+#if 0 /* Not sure uninorth supports that high aperture sizes */
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+#endif
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+/*
+ * Not sure that u3 supports that high aperture sizes but it
+ * would strange if it did not :)
+ */
+static struct aper_size_info_32 u3_sizes[8] =
+{
+ {512, 131072, 7, 128},
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+struct agp_bridge_driver uninorth_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)uninorth_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .cant_use_aperture = 1,
+};
+
+struct agp_bridge_driver u3_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)u3_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 8,
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = u3_insert_memory,
+ .remove_memory = u3_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .cant_use_aperture = 1,
+ .needs_scratch_page = 1,
+};
+
+static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
+ .chipset_name = "UniNorth",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
+ .chipset_name = "UniNorth/Pangea",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
+ .chipset_name = "UniNorth 1.5",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
+ .chipset_name = "UniNorth 2",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
+ .chipset_name = "U3",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
+ .chipset_name = "U3L",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
+ .chipset_name = "U3H",
+ },
+};
+
+static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = uninorth_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ struct device_node *uninorth_node;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (cap_ptr == 0)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name != NULL; ++j) {
+ if (pdev->device == devs[j].device_id) {
+ printk(KERN_INFO PFX "Detected Apple %s chipset\n",
+ devs[j].chipset_name);
+ goto found;
+ }
+ }
+
+ printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n",
+ pdev->device);
+ return -ENODEV;
+
+ found:
+ /* Set revision to 0 if we could not read it. */
+ uninorth_rev = 0;
+ is_u3 = 0;
+ /* Locate core99 Uni-N */
+ uninorth_node = of_find_node_by_name(NULL, "uni-n");
+ /* Locate G5 u3 */
+ if (uninorth_node == NULL) {
+ is_u3 = 1;
+ uninorth_node = of_find_node_by_name(NULL, "u3");
+ }
+ if (uninorth_node) {
+ int *revprop = (int *)
+ get_property(uninorth_node, "device-rev", NULL);
+ if (revprop != NULL)
+ uninorth_rev = *revprop & 0x3f;
+ of_node_put(uninorth_node);
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (is_u3)
+ bridge->driver = &u3_agp_driver;
+ else
+ bridge->driver = &uninorth_agp_driver;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->flags = AGP_ERRATA_FASTWRITES;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_uninorth_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
+
+static struct pci_driver agp_uninorth_pci_driver = {
+ .name = "agpgart-uninorth",
+ .id_table = agp_uninorth_pci_table,
+ .probe = agp_uninorth_probe,
+ .remove = agp_uninorth_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_uninorth_suspend,
+ .resume = agp_uninorth_resume,
+#endif
+};
+
+static int __init agp_uninorth_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_uninorth_pci_driver);
+}
+
+static void __exit agp_uninorth_cleanup(void)
+{
+ pci_unregister_driver(&agp_uninorth_pci_driver);
+}
+
+module_init(agp_uninorth_init);
+module_exit(agp_uninorth_cleanup);
+
+MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
new file mode 100644
index 00000000000..e1451dd9b6a
--- /dev/null
+++ b/drivers/char/agp/via-agp.c
@@ -0,0 +1,548 @@
+/*
+ * VIA AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static struct pci_device_id agp_via_pci_table[];
+
+#define VIA_GARTCTRL 0x80
+#define VIA_APSIZE 0x84
+#define VIA_ATTBASE 0x88
+
+#define VIA_AGP3_GARTCTRL 0x90
+#define VIA_AGP3_APSIZE 0x94
+#define VIA_AGP3_ATTBASE 0x98
+#define VIA_AGPSEL 0xfd
+
+static int via_fetch_size(void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
+ return 0;
+}
+
+
+static int via_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ current_size->size_value);
+ /* address to map too */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* GART control register */
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
+ return 0;
+}
+
+
+static void via_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ previous_size->size_value);
+ /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+ * during reinitialization.
+ */
+}
+
+
+static void via_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
+ temp |= (1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+ temp &= ~(1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+}
+
+
+static struct aper_size_info_8 via_generic_sizes[9] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 128},
+ {64, 16384, 4, 192},
+ {32, 8192, 3, 224},
+ {16, 4096, 2, 240},
+ {8, 2048, 1, 248},
+ {4, 1024, 0, 252},
+ {2, 512, 0, 254},
+ {1, 256, 0, 255}
+};
+
+
+static int via_fetch_size_agp3(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+ pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
+ temp &= 0xfff;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+
+static int via_configure_agp3(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* address to map too */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
+ agp_bridge->gatt_bus_addr & 0xfffff000);
+
+ /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
+ * translation table first.
+ * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
+ * graphics AGP aperture for the AGP3.0 port.
+ */
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
+ return 0;
+}
+
+
+static void via_cleanup_agp3(void)
+{
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
+}
+
+
+static void via_tlbflush_agp3(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
+}
+
+
+struct agp_bridge_driver via_agp3_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = agp3_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 10,
+ .configure = via_configure_agp3,
+ .fetch_size = via_fetch_size_agp3,
+ .cleanup = via_cleanup_agp3,
+ .tlb_flush = via_tlbflush_agp3,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+struct agp_bridge_driver via_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = via_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 9,
+ .configure = via_configure,
+ .fetch_size = via_fetch_size,
+ .cleanup = via_cleanup,
+ .tlb_flush = via_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+static struct agp_device_ids via_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C597_0,
+ .chipset_name = "Apollo VP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C598_0,
+ .chipset_name = "Apollo MVP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8501_0,
+ .chipset_name = "Apollo MVP4",
+ },
+
+ /* VT8601 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8601_0,
+ .chipset_name = "Apollo ProMedia/PLE133Ta",
+ },
+
+ /* VT82C693A / VT28C694T */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C691_0,
+ .chipset_name = "Apollo Pro 133",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8371_0,
+ .chipset_name = "KX133",
+ },
+
+ /* VT8633 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8633_0,
+ .chipset_name = "Pro 266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XN266,
+ .chipset_name = "Apollo Pro266",
+ },
+
+ /* VT8361 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8361,
+ .chipset_name = "KLE133",
+ },
+
+ /* VT8365 / VT8362 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8363_0,
+ .chipset_name = "Twister-K/KT133x/KM133",
+ },
+
+ /* VT8753A */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8753_0,
+ .chipset_name = "P4X266",
+ },
+
+ /* VT8366 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8367_0,
+ .chipset_name = "KT266/KY266x/KT333",
+ },
+
+ /* VT8633 (for CuMine/ Celeron) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8653_0,
+ .chipset_name = "Pro266T",
+ },
+
+ /* KM266 / PM266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XM266,
+ .chipset_name = "PM266/KM266",
+ },
+
+ /* CLE266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_862X_0,
+ .chipset_name = "CLE266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8377_0,
+ .chipset_name = "KT400/KT400A/KT600",
+ },
+
+ /* VT8604 / VT8605 / VT8603
+ * (Apollo Pro133A chipset with S3 Savage4) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8605_0,
+ .chipset_name = "ProSavage PM133/PL133/PN133"
+ },
+
+ /* P4M266x/P4N266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8703_51_0,
+ .chipset_name = "P4M266x/P4N266",
+ },
+
+ /* VT8754 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8754C_0,
+ .chipset_name = "PT800",
+ },
+
+ /* P4X600 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8763_0,
+ .chipset_name = "P4X600"
+ },
+
+ /* KM400 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8378_0,
+ .chipset_name = "KM400/KM400A",
+ },
+
+ /* PT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PT880,
+ .chipset_name = "PT880",
+ },
+
+ /* PT890 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8783_0,
+ .chipset_name = "PT890",
+ },
+
+ /* PM800/PN800/PM880/PN880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
+ .chipset_name = "PM800/PN800/PM880/PN880",
+ },
+ /* KT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3269_0,
+ .chipset_name = "KT880",
+ },
+ /* KTxxx/Px8xx */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
+ .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
+ },
+ /* P4M800 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3296_0,
+ .chipset_name = "P4M800",
+ },
+
+ { }, /* dummy final entry, always present */
+};
+
+
+/*
+ * VIA's AGP3 chipsets do magick to put the AGP bridge compliant
+ * with the same standards version as the graphics card.
+ */
+static void check_via_agp3 (struct agp_bridge_data *bridge)
+{
+ u8 reg;
+
+ pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
+ /* Check AGP 2.0 compatibility mode. */
+ if ((reg & (1<<1))==0)
+ bridge->driver = &via_agp3_driver;
+}
+
+
+static int __devinit agp_via_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = via_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ int j = 0;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_via_pci_table;
+ printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->driver = &via_driver;
+
+ /*
+ * Garg, there are KT400s with KT266 IDs.
+ */
+ if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
+ /* Is there a KT400 subsystem ? */
+ if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
+ printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
+ check_via_agp3(bridge);
+ }
+ }
+
+ /* If this is an AGP3 bridge, check which mode its in and adjust. */
+ get_agp_version(bridge);
+ if (bridge->major_version >= 3)
+ check_via_agp3(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_via_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state (pdev);
+ pci_set_power_state (pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int agp_via_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ pci_set_power_state (pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (bridge->driver == &via_agp3_driver)
+ return via_configure_agp3();
+ else if (bridge->driver == &via_driver)
+ return via_configure();
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/* must be the same order as name table above */
+static struct pci_device_id agp_via_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_VIA, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_VIA_82C597_0),
+ ID(PCI_DEVICE_ID_VIA_82C598_0),
+ ID(PCI_DEVICE_ID_VIA_8501_0),
+ ID(PCI_DEVICE_ID_VIA_8601_0),
+ ID(PCI_DEVICE_ID_VIA_82C691_0),
+ ID(PCI_DEVICE_ID_VIA_8371_0),
+ ID(PCI_DEVICE_ID_VIA_8633_0),
+ ID(PCI_DEVICE_ID_VIA_XN266),
+ ID(PCI_DEVICE_ID_VIA_8361),
+ ID(PCI_DEVICE_ID_VIA_8363_0),
+ ID(PCI_DEVICE_ID_VIA_8753_0),
+ ID(PCI_DEVICE_ID_VIA_8367_0),
+ ID(PCI_DEVICE_ID_VIA_8653_0),
+ ID(PCI_DEVICE_ID_VIA_XM266),
+ ID(PCI_DEVICE_ID_VIA_862X_0),
+ ID(PCI_DEVICE_ID_VIA_8377_0),
+ ID(PCI_DEVICE_ID_VIA_8605_0),
+ ID(PCI_DEVICE_ID_VIA_8703_51_0),
+ ID(PCI_DEVICE_ID_VIA_8754C_0),
+ ID(PCI_DEVICE_ID_VIA_8763_0),
+ ID(PCI_DEVICE_ID_VIA_8378_0),
+ ID(PCI_DEVICE_ID_VIA_PT880),
+ ID(PCI_DEVICE_ID_VIA_8783_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+ ID(PCI_DEVICE_ID_VIA_3269_0),
+ ID(PCI_DEVICE_ID_VIA_83_87XX_1),
+ ID(PCI_DEVICE_ID_VIA_3296_0),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+
+
+static struct pci_driver agp_via_pci_driver = {
+ .name = "agpgart-via",
+ .id_table = agp_via_pci_table,
+ .probe = agp_via_probe,
+ .remove = agp_via_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_via_suspend,
+ .resume = agp_via_resume,
+#endif
+};
+
+
+static int __init agp_via_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_via_pci_driver);
+}
+
+static void __exit agp_via_cleanup(void)
+{
+ pci_unregister_driver(&agp_via_pci_driver);
+}
+
+module_init(agp_via_init);
+module_exit(agp_via_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
new file mode 100644
index 00000000000..1dc4259213a
--- /dev/null
+++ b/drivers/char/amiserial.c
@@ -0,0 +1,2179 @@
+/*
+ * linux/drivers/char/amiserial.c
+ *
+ * Serial driver for the amiga builtin port.
+ *
+ * This code was created by taking serial.c version 4.30 from kernel
+ * release 2.3.22, replacing all hardware related stuff with the
+ * corresponding amiga hardware actions, and removing all irrelevant
+ * code. As a consequence, it uses many of the constants and names
+ * associated with the registers and bits of 16550 compatible UARTS -
+ * but only to keep track of status, etc in the state variables. It
+ * was done this was to make it easier to keep the code in line with
+ * (non hardware specific) changes to serial.c.
+ *
+ * The port is registered with the tty driver as minor device 64, and
+ * therefore other ports should should only use 65 upwards.
+ *
+ * Richard Lucock 28/12/99
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997,
+ * 1998, 1999 Theodore Ts'o
+ *
+ */
+
+/*
+ * Serial driver configuration section. Here are the various options:
+ *
+ * SERIAL_PARANOIA_CHECK
+ * Check the magic number for the async_structure where
+ * ever possible.
+ */
+
+#include <linux/config.h>
+#include <linux/delay.h>
+
+#undef SERIAL_PARANOIA_CHECK
+#define SERIAL_DO_RESTART
+
+/* Set of debugging defines */
+
+#undef SERIAL_DEBUG_INTR
+#undef SERIAL_DEBUG_OPEN
+#undef SERIAL_DEBUG_FLOW
+#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+
+/* Sanity checks */
+
+#define SERIAL_INLINE
+
+#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
+#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
+ tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s)
+#else
+#define DBG_CNT(s)
+#endif
+
+/*
+ * End of serial driver configuration section.
+ */
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/serial_reg.h>
+static char *serial_version = "4.30";
+
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+
+#include <asm/system.h>
+
+#include <asm/irq.h>
+
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+#ifdef SERIAL_INLINE
+#define _INLINE_ inline
+#endif
+
+static char *serial_name = "Amiga-builtin serial driver";
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+static struct async_struct *IRQ_ports;
+
+static unsigned char current_ctl_bits;
+
+static void change_speed(struct async_struct *info, struct termios *old);
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
+
+
+static struct serial_state rs_table[1];
+
+#define NR_PORTS (sizeof(rs_table)/sizeof(struct serial_state))
+
+/*
+ * tmp_buf is used as a temporary buffer by serial_write. We need to
+ * lock it in case the copy_from_user blocks while swapping in a page,
+ * and some other program tries to do a serial write at the same time.
+ * Since the lock will only come under contention when the system is
+ * swapping and available memory is low, it makes sense to share one
+ * buffer across all the serial ports, since it significantly saves
+ * memory if large numbers of serial ports are open.
+ */
+static unsigned char *tmp_buf;
+static DECLARE_MUTEX(tmp_buf_sem);
+
+#include <asm/uaccess.h>
+
+#define serial_isroot() (capable(CAP_SYS_ADMIN))
+
+
+static inline int serial_paranoia_check(struct async_struct *info,
+ char *name, const char *routine)
+{
+#ifdef SERIAL_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for serial struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null async_struct for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != SERIAL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* some serial hardware definitions */
+#define SDR_OVRUN (1<<15)
+#define SDR_RBF (1<<14)
+#define SDR_TBE (1<<13)
+#define SDR_TSRE (1<<12)
+
+#define SERPER_PARENB (1<<15)
+
+#define AC_SETCLR (1<<15)
+#define AC_UARTBRK (1<<11)
+
+#define SER_DTR (1<<7)
+#define SER_RTS (1<<6)
+#define SER_DCD (1<<5)
+#define SER_CTS (1<<4)
+#define SER_DSR (1<<3)
+
+static __inline__ void rtsdtr_ctrl(int bits)
+{
+ ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR));
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop() and rs_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ * ------------------------------------------------------------
+ */
+static void rs_stop(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_stop"))
+ return;
+
+ local_irq_save(flags);
+ if (info->IER & UART_IER_THRI) {
+ info->IER &= ~UART_IER_THRI;
+ /* disable Tx interrupt and remove any pending interrupts */
+ custom.intena = IF_TBE;
+ mb();
+ custom.intreq = IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+static void rs_start(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_start"))
+ return;
+
+ local_irq_save(flags);
+ if (info->xmit.head != info->xmit.tail
+ && info->xmit.buf
+ && !(info->IER & UART_IER_THRI)) {
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * ----------------------------------------------------------------------
+ *
+ * Here starts the interrupt handling routines. All of the following
+ * subroutines are declared as inline and are folded into
+ * rs_interrupt(). They were separated out for readability's sake.
+ *
+ * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * runs with interrupts turned off. People who may want to modify
+ * rs_interrupt() should try to keep the interrupt handler as fast as
+ * possible. After you are done making modifications, it is not a bad
+ * idea to do:
+ *
+ * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
+ *
+ * and look at the resulting assemble code in serial.s.
+ *
+ * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used by the interrupt handler to schedule
+ * processing in the software interrupt portion of the driver.
+ */
+static _INLINE_ void rs_sched_event(struct async_struct *info,
+ int event)
+{
+ info->event |= 1 << event;
+ tasklet_schedule(&info->tlet);
+}
+
+static _INLINE_ void receive_chars(struct async_struct *info)
+{
+ int status;
+ int serdatr;
+ struct tty_struct *tty = info->tty;
+ unsigned char ch;
+ struct async_icount *icount;
+
+ icount = &info->state->icount;
+
+ status = UART_LSR_DR; /* We obviously have a character! */
+ serdatr = custom.serdatr;
+ mb();
+ custom.intreq = IF_RBF;
+ mb();
+
+ if((serdatr & 0x1ff) == 0)
+ status |= UART_LSR_BI;
+ if(serdatr & SDR_OVRUN)
+ status |= UART_LSR_OE;
+
+ ch = serdatr & 0xff;
+ if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ goto ignore_char;
+ *tty->flip.char_buf_ptr = ch;
+ icount->rx++;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("DR%02x:%02x...", ch, status);
+#endif
+ *tty->flip.flag_buf_ptr = 0;
+
+ /*
+ * We don't handle parity or frame errors - but I have left
+ * the code in, since I'm not sure that the errors can't be
+ * detected.
+ */
+
+ if (status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE)) {
+ /*
+ * For statistics only
+ */
+ if (status & UART_LSR_BI) {
+ status &= ~(UART_LSR_FE | UART_LSR_PE);
+ icount->brk++;
+ } else if (status & UART_LSR_PE)
+ icount->parity++;
+ else if (status & UART_LSR_FE)
+ icount->frame++;
+ if (status & UART_LSR_OE)
+ icount->overrun++;
+
+ /*
+ * Now check to see if character should be
+ * ignored, and mask off conditions which
+ * should be ignored.
+ */
+ if (status & info->ignore_status_mask)
+ goto ignore_char;
+
+ status &= info->read_status_mask;
+
+ if (status & (UART_LSR_BI)) {
+#ifdef SERIAL_DEBUG_INTR
+ printk("handling break....");
+#endif
+ *tty->flip.flag_buf_ptr = TTY_BREAK;
+ if (info->flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (status & UART_LSR_PE)
+ *tty->flip.flag_buf_ptr = TTY_PARITY;
+ else if (status & UART_LSR_FE)
+ *tty->flip.flag_buf_ptr = TTY_FRAME;
+ if (status & UART_LSR_OE) {
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ if (tty->flip.count < TTY_FLIPBUF_SIZE) {
+ tty->flip.count++;
+ tty->flip.flag_buf_ptr++;
+ tty->flip.char_buf_ptr++;
+ *tty->flip.flag_buf_ptr = TTY_OVERRUN;
+ }
+ }
+ }
+ tty->flip.flag_buf_ptr++;
+ tty->flip.char_buf_ptr++;
+ tty->flip.count++;
+ ignore_char:
+
+ tty_flip_buffer_push(tty);
+}
+
+static _INLINE_ void transmit_chars(struct async_struct *info)
+{
+ custom.intreq = IF_TBE;
+ mb();
+ if (info->x_char) {
+ custom.serdat = info->x_char | 0x100;
+ mb();
+ info->state->icount.tx++;
+ info->x_char = 0;
+ return;
+ }
+ if (info->xmit.head == info->xmit.tail
+ || info->tty->stopped
+ || info->tty->hw_stopped) {
+ info->IER &= ~UART_IER_THRI;
+ custom.intena = IF_TBE;
+ mb();
+ return;
+ }
+
+ custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
+ mb();
+ info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->state->icount.tx++;
+
+ if (CIRC_CNT(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("THRE...");
+#endif
+ if (info->xmit.head == info->xmit.tail) {
+ custom.intena = IF_TBE;
+ mb();
+ info->IER &= ~UART_IER_THRI;
+ }
+}
+
+static _INLINE_ void check_modem_status(struct async_struct *info)
+{
+ unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
+ unsigned char dstatus;
+ struct async_icount *icount;
+
+ /* Determine bits that have changed */
+ dstatus = status ^ current_ctl_bits;
+ current_ctl_bits = status;
+
+ if (dstatus) {
+ icount = &info->state->icount;
+ /* update input line counters */
+ if (dstatus & SER_DSR)
+ icount->dsr++;
+ if (dstatus & SER_DCD) {
+ icount->dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((info->flags & ASYNC_HARDPPS_CD) &&
+ !(status & SER_DCD))
+ hardpps();
+#endif
+ }
+ if (dstatus & SER_CTS)
+ icount->cts++;
+ wake_up_interruptible(&info->delta_msr_wait);
+ }
+
+ if ((info->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) {
+#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
+ printk("ttyS%d CD now %s...", info->line,
+ (!(status & SER_DCD)) ? "on" : "off");
+#endif
+ if (!(status & SER_DCD))
+ wake_up_interruptible(&info->open_wait);
+ else {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("doing serial hangup...");
+#endif
+ if (info->tty)
+ tty_hangup(info->tty);
+ }
+ }
+ if (info->flags & ASYNC_CTS_FLOW) {
+ if (info->tty->hw_stopped) {
+ if (!(status & SER_CTS)) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx start...");
+#endif
+ info->tty->hw_stopped = 0;
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+ return;
+ }
+ } else {
+ if ((status & SER_CTS)) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx stop...");
+#endif
+ info->tty->hw_stopped = 1;
+ info->IER &= ~UART_IER_THRI;
+ /* disable Tx interrupt and remove any pending interrupts */
+ custom.intena = IF_TBE;
+ mb();
+ custom.intreq = IF_TBE;
+ mb();
+ }
+ }
+ }
+}
+
+static irqreturn_t ser_vbl_int( int irq, void *data, struct pt_regs *regs)
+{
+ /* vbl is just a periodic interrupt we tie into to update modem status */
+ struct async_struct * info = IRQ_ports;
+ /*
+ * TBD - is it better to unregister from this interrupt or to
+ * ignore it if MSI is clear ?
+ */
+ if(info->IER & UART_IER_MSI)
+ check_modem_status(info);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ser_rx_int(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct async_struct * info;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_rx_int...");
+#endif
+
+ info = IRQ_ports;
+ if (!info || !info->tty)
+ return IRQ_NONE;
+
+ receive_chars(info);
+ info->last_active = jiffies;
+#ifdef SERIAL_DEBUG_INTR
+ printk("end.\n");
+#endif
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ser_tx_int(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct async_struct * info;
+
+ if (custom.serdatr & SDR_TBE) {
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_tx_int...");
+#endif
+
+ info = IRQ_ports;
+ if (!info || !info->tty)
+ return IRQ_NONE;
+
+ transmit_chars(info);
+ info->last_active = jiffies;
+#ifdef SERIAL_DEBUG_INTR
+ printk("end.\n");
+#endif
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------------------------------------------------------
+ * Here ends the serial interrupt routines.
+ * -------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used to handle the "bottom half" processing for the
+ * serial driver, known also the "software interrupt" processing.
+ * This processing is done at the kernel interrupt level, after the
+ * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
+ * is where time-consuming activities which can not be done in the
+ * interrupt driver proper are done; the interrupt driver schedules
+ * them using rs_sched_event(), and they get done here.
+ */
+
+static void do_softint(unsigned long private_)
+{
+ struct async_struct *info = (struct async_struct *) private_;
+ struct tty_struct *tty;
+
+ tty = info->tty;
+ if (!tty)
+ return;
+
+ if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) {
+ tty_wakeup(tty);
+ wake_up_interruptible(&tty->write_wait);
+ }
+}
+
+/*
+ * ---------------------------------------------------------------
+ * Low level utility subroutines for the serial driver: routines to
+ * figure out the appropriate timeout for an interrupt chain, routines
+ * to initialize and startup a serial port, and routines to shutdown a
+ * serial port. Useful stuff like that.
+ * ---------------------------------------------------------------
+ */
+
+static int startup(struct async_struct * info)
+{
+ unsigned long flags;
+ int retval=0;
+ unsigned long page;
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ local_irq_save(flags);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ free_page(page);
+ goto errout;
+ }
+
+ if (info->xmit.buf)
+ free_page(page);
+ else
+ info->xmit.buf = (unsigned char *) page;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("starting up ttys%d ...", info->line);
+#endif
+
+ /* Clear anything in the input buffer */
+
+ custom.intreq = IF_RBF;
+ mb();
+
+ retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info);
+ if (retval) {
+ if (serial_isroot()) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR,
+ &info->tty->flags);
+ retval = 0;
+ }
+ goto errout;
+ }
+
+ /* enable both Rx and Tx interrupts */
+ custom.intena = IF_SETCLR | IF_RBF | IF_TBE;
+ mb();
+ info->IER = UART_IER_MSI;
+
+ /* remember current state of the DCD and CTS bits */
+ current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
+
+ IRQ_ports = info;
+
+ info->MCR = 0;
+ if (info->tty->termios->c_cflag & CBAUD)
+ info->MCR = SER_DTR | SER_RTS;
+ rtsdtr_ctrl(info->MCR);
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ info->xmit.head = info->xmit.tail = 0;
+
+ /*
+ * Set up the tty->alt_speed kludge
+ */
+ if (info->tty) {
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ info->tty->alt_speed = 57600;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ info->tty->alt_speed = 115200;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ info->tty->alt_speed = 230400;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ info->tty->alt_speed = 460800;
+ }
+
+ /*
+ * and set the speed of the serial port
+ */
+ change_speed(info, NULL);
+
+ info->flags |= ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+ return 0;
+
+errout:
+ local_irq_restore(flags);
+ return retval;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void shutdown(struct async_struct * info)
+{
+ unsigned long flags;
+ struct serial_state *state;
+
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ state = info->state;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("Shutting down serial port %d ....\n", info->line);
+#endif
+
+ local_irq_save(flags); /* Disable interrupts */
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
+ * here so the queue might never be waken up
+ */
+ wake_up_interruptible(&info->delta_msr_wait);
+
+ IRQ_ports = NULL;
+
+ /*
+ * Free the IRQ, if necessary
+ */
+ free_irq(IRQ_AMIGA_VERTB, info);
+
+ if (info->xmit.buf) {
+ free_page((unsigned long) info->xmit.buf);
+ info->xmit.buf = NULL;
+ }
+
+ info->IER = 0;
+ custom.intena = IF_RBF | IF_TBE;
+ mb();
+
+ /* disable break condition */
+ custom.adkcon = AC_UARTBRK;
+ mb();
+
+ if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
+ info->MCR &= ~(SER_DTR|SER_RTS);
+ rtsdtr_ctrl(info->MCR);
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+}
+
+
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static void change_speed(struct async_struct *info,
+ struct termios *old_termios)
+{
+ int quot = 0, baud_base, baud;
+ unsigned cflag, cval = 0;
+ int bits;
+ unsigned long flags;
+
+ if (!info->tty || !info->tty->termios)
+ return;
+ cflag = info->tty->termios->c_cflag;
+
+ /* Byte size is always 8 bits plus parity bit if requested */
+
+ cval = 3; bits = 10;
+ if (cflag & CSTOPB) {
+ cval |= 0x04;
+ bits++;
+ }
+ if (cflag & PARENB) {
+ cval |= UART_LCR_PARITY;
+ bits++;
+ }
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ /* Determine divisor based on baud rate */
+ baud = tty_get_baud_rate(info->tty);
+ if (!baud)
+ baud = 9600; /* B0 transition handled in rs_set_termios */
+ baud_base = info->state->baud_base;
+ if (baud == 38400 &&
+ ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
+ quot = info->state->custom_divisor;
+ else {
+ if (baud == 134)
+ /* Special case since 134 is really 134.5 */
+ quot = (2*baud_base / 269);
+ else if (baud)
+ quot = baud_base / baud;
+ }
+ /* If the quotient is zero refuse the change */
+ if (!quot && old_termios) {
+ info->tty->termios->c_cflag &= ~CBAUD;
+ info->tty->termios->c_cflag |= (old_termios->c_cflag & CBAUD);
+ baud = tty_get_baud_rate(info->tty);
+ if (!baud)
+ baud = 9600;
+ if (baud == 38400 &&
+ ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
+ quot = info->state->custom_divisor;
+ else {
+ if (baud == 134)
+ /* Special case since 134 is really 134.5 */
+ quot = (2*baud_base / 269);
+ else if (baud)
+ quot = baud_base / baud;
+ }
+ }
+ /* As a last resort, if the quotient is zero, default to 9600 bps */
+ if (!quot)
+ quot = baud_base / 9600;
+ info->quot = quot;
+ info->timeout = ((info->xmit_fifo_size*HZ*bits*quot) / baud_base);
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ if (info->flags & ASYNC_HARDPPS_CD)
+ info->IER |= UART_IER_MSI;
+ if (cflag & CRTSCTS) {
+ info->flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ } else
+ info->flags &= ~ASYNC_CTS_FLOW;
+ if (cflag & CLOCAL)
+ info->flags &= ~ASYNC_CHECK_CD;
+ else {
+ info->flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ /* TBD:
+ * Does clearing IER_MSI imply that we should disbale the VBL interrupt ?
+ */
+
+ /*
+ * Set up parity check flag
+ */
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+ info->read_status_mask = UART_LSR_OE | UART_LSR_DR;
+ if (I_INPCK(info->tty))
+ info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
+ info->read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ info->ignore_status_mask = 0;
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (I_IGNBRK(info->tty)) {
+ info->ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= UART_LSR_OE;
+ }
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ info->ignore_status_mask |= UART_LSR_DR;
+ local_irq_save(flags);
+
+ {
+ short serper;
+
+ /* Set up the baud rate */
+ serper = quot - 1;
+
+ /* Enable or disable parity bit */
+
+ if(cval & UART_LCR_PARITY)
+ serper |= (SERPER_PARENB);
+
+ custom.serper = serper;
+ mb();
+ }
+
+ info->LCR = cval; /* Save LCR */
+ local_irq_restore(flags);
+}
+
+static void rs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_put_char"))
+ return;
+
+ if (!tty || !info->xmit.buf)
+ return;
+
+ local_irq_save(flags);
+ if (CIRC_SPACE(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) == 0) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ info->xmit.buf[info->xmit.head++] = ch;
+ info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ local_irq_restore(flags);
+}
+
+static void rs_flush_chars(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
+ return;
+
+ if (info->xmit.head == info->xmit.tail
+ || tty->stopped
+ || tty->hw_stopped
+ || !info->xmit.buf)
+ return;
+
+ local_irq_save(flags);
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ local_irq_restore(flags);
+}
+
+static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write"))
+ return 0;
+
+ if (!tty || !info->xmit.buf || !tmp_buf)
+ return 0;
+
+ local_save_flags(flags);
+ local_irq_disable();
+ while (1) {
+ c = CIRC_SPACE_TO_END(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE);
+ if (count < c)
+ c = count;
+ if (c <= 0) {
+ break;
+ }
+ memcpy(info->xmit.buf + info->xmit.head, buf, c);
+ info->xmit.head = ((info->xmit.head + c) &
+ (SERIAL_XMIT_SIZE-1));
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ local_irq_restore(flags);
+
+ if (info->xmit.head != info->xmit.tail
+ && !tty->stopped
+ && !tty->hw_stopped
+ && !(info->IER & UART_IER_THRI)) {
+ info->IER |= UART_IER_THRI;
+ local_irq_disable();
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ local_irq_restore(flags);
+ }
+ return ret;
+}
+
+static int rs_write_room(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write_room"))
+ return 0;
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+static int rs_chars_in_buffer(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
+ return 0;
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+static void rs_flush_buffer(struct tty_struct *tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
+ return;
+ local_irq_save(flags);
+ info->xmit.head = info->xmit.tail = 0;
+ local_irq_restore(flags);
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ */
+static void rs_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_send_char"))
+ return;
+
+ info->x_char = ch;
+ if (ch) {
+ /* Make sure transmit interrupts are on */
+
+ /* Check this ! */
+ local_irq_save(flags);
+ if(!(custom.intenar & IF_TBE)) {
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+
+ info->IER |= UART_IER_THRI;
+ }
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void rs_throttle(struct tty_struct * tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("throttle %s: %d....\n", tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ rs_send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR &= ~SER_RTS;
+
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+}
+
+static void rs_unthrottle(struct tty_struct * tty)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("unthrottle %s: %d....\n", tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ rs_send_xchar(tty, START_CHAR(tty));
+ }
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR |= SER_RTS;
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+static int get_serial_info(struct async_struct * info,
+ struct serial_struct * retinfo)
+{
+ struct serial_struct tmp;
+ struct serial_state *state = info->state;
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = state->type;
+ tmp.line = state->line;
+ tmp.port = state->port;
+ tmp.irq = state->irq;
+ tmp.flags = state->flags;
+ tmp.xmit_fifo_size = state->xmit_fifo_size;
+ tmp.baud_base = state->baud_base;
+ tmp.close_delay = state->close_delay;
+ tmp.closing_wait = state->closing_wait;
+ tmp.custom_divisor = state->custom_divisor;
+ if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_serial_info(struct async_struct * info,
+ struct serial_struct * new_info)
+{
+ struct serial_struct new_serial;
+ struct serial_state old_state, *state;
+ unsigned int change_irq,change_port;
+ int retval = 0;
+
+ if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
+ return -EFAULT;
+ state = info->state;
+ old_state = *state;
+
+ change_irq = new_serial.irq != state->irq;
+ change_port = (new_serial.port != state->port);
+ if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size))
+ return -EINVAL;
+
+ if (!serial_isroot()) {
+ if ((new_serial.baud_base != state->baud_base) ||
+ (new_serial.close_delay != state->close_delay) ||
+ (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) !=
+ (state->flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ state->flags = ((state->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ state->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ if (new_serial.baud_base < 9600)
+ return -EINVAL;
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ state->baud_base = new_serial.baud_base;
+ state->flags = ((state->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ info->flags = ((state->flags & ~ASYNC_INTERNAL_FLAGS) |
+ (info->flags & ASYNC_INTERNAL_FLAGS));
+ state->custom_divisor = new_serial.custom_divisor;
+ state->close_delay = new_serial.close_delay * HZ/100;
+ state->closing_wait = new_serial.closing_wait * HZ/100;
+ info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+check_and_exit:
+ if (info->flags & ASYNC_INITIALIZED) {
+ if (((old_state.flags & ASYNC_SPD_MASK) !=
+ (state->flags & ASYNC_SPD_MASK)) ||
+ (old_state.custom_divisor != state->custom_divisor)) {
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ info->tty->alt_speed = 57600;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ info->tty->alt_speed = 115200;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ info->tty->alt_speed = 230400;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ info->tty->alt_speed = 460800;
+ change_speed(info, NULL);
+ }
+ } else
+ retval = startup(info);
+ return retval;
+}
+
+
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int get_lsr_info(struct async_struct * info, unsigned int *value)
+{
+ unsigned char status;
+ unsigned int result;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ status = custom.serdatr;
+ mb();
+ local_irq_restore(flags);
+ result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0);
+ if (copy_to_user(value, &result, sizeof(int)))
+ return -EFAULT;
+ return 0;
+}
+
+
+static int rs_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ unsigned char control, status;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ control = info->MCR;
+ local_irq_save(flags);
+ status = ciab.pra;
+ local_irq_restore(flags);
+ return ((control & SER_RTS) ? TIOCM_RTS : 0)
+ | ((control & SER_DTR) ? TIOCM_DTR : 0)
+ | (!(status & SER_DCD) ? TIOCM_CAR : 0)
+ | (!(status & SER_DSR) ? TIOCM_DSR : 0)
+ | (!(status & SER_CTS) ? TIOCM_CTS : 0);
+}
+
+static int rs_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ local_irq_save(flags);
+ if (set & TIOCM_RTS)
+ info->MCR |= SER_RTS;
+ if (set & TIOCM_DTR)
+ info->MCR |= SER_DTR;
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~SER_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~SER_DTR;
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * rs_break() --- routine which turns the break handling on or off
+ */
+static void rs_break(struct tty_struct *tty, int break_state)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_break"))
+ return;
+
+ local_irq_save(flags);
+ if (break_state == -1)
+ custom.adkcon = AC_SETCLR | AC_UARTBRK;
+ else
+ custom.adkcon = AC_UARTBRK;
+ mb();
+ local_irq_restore(flags);
+}
+
+
+static int rs_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ struct async_icount cprev, cnow; /* kernel counter temps */
+ struct serial_icounter_struct icount;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
+ (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSSERIAL:
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSERCONFIG:
+ return 0;
+
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, (unsigned int *) arg);
+
+ case TIOCSERGSTRUCT:
+ if (copy_to_user((struct async_struct *) arg,
+ info, sizeof(struct async_struct)))
+ return -EFAULT;
+ return 0;
+
+ /*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ */
+ case TIOCMIWAIT:
+ local_irq_save(flags);
+ /* note the counters on entry */
+ cprev = info->state->icount;
+ local_irq_restore(flags);
+ while (1) {
+ interruptible_sleep_on(&info->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ local_irq_save(flags);
+ cnow = info->state->icount; /* atomic copy */
+ local_irq_restore(flags);
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
+ return 0;
+ }
+ cprev = cnow;
+ }
+ /* NOTREACHED */
+
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+ case TIOCGICOUNT:
+ local_irq_save(flags);
+ cnow = info->state->icount;
+ local_irq_restore(flags);
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+
+ if (copy_to_user((void *)arg, &icount, sizeof(icount)))
+ return -EFAULT;
+ return 0;
+ case TIOCSERGWILD:
+ case TIOCSERSWILD:
+ /* "setserial -W" is called in Debian boot */
+ printk ("TIOCSER?WILD ioctl obsolete, ignored.\n");
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios)
+{
+ struct async_struct *info = (struct async_struct *)tty->driver_data;
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag;
+
+ if ( (cflag == old_termios->c_cflag)
+ && ( RELEVANT_IFLAG(tty->termios->c_iflag)
+ == RELEVANT_IFLAG(old_termios->c_iflag)))
+ return;
+
+ change_speed(info, old_termios);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) &&
+ !(cflag & CBAUD)) {
+ info->MCR &= ~(SER_DTR|SER_RTS);
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ (cflag & CBAUD)) {
+ info->MCR |= SER_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->MCR |= SER_RTS;
+ }
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rs_start(tty);
+ }
+
+#if 0
+ /*
+ * No need to wake up processes in open wait, since they
+ * sample the CLOCAL flag once, and don't recheck it.
+ * XXX It's not clear whether the current behavior is correct
+ * or not. Hence, this may change.....
+ */
+ if (!(old_termios->c_cflag & CLOCAL) &&
+ (tty->termios->c_cflag & CLOCAL))
+ wake_up_interruptible(&info->open_wait);
+#endif
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_close(struct tty_struct *tty, struct file * filp)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ struct serial_state *state;
+ unsigned long flags;
+
+ if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ return;
+
+ state = info->state;
+
+ local_irq_save(flags);
+
+ if (tty_hung_up_p(filp)) {
+ DBG_CNT("before DEC-hung");
+ local_irq_restore(flags);
+ return;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_close ttys%d, count = %d\n", info->line, state->count);
+#endif
+ if ((tty->count == 1) && (state->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk("rs_close: bad serial port count; tty->count is 1, "
+ "state->count is %d\n", state->count);
+ state->count = 1;
+ }
+ if (--state->count < 0) {
+ printk("rs_close: bad serial port count for ttys%d: %d\n",
+ info->line, state->count);
+ state->count = 0;
+ }
+ if (state->count) {
+ DBG_CNT("before DEC-2");
+ local_irq_restore(flags);
+ return;
+ }
+ info->flags |= ASYNC_CLOSING;
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters.
+ */
+ tty->closing = 1;
+ if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, info->closing_wait);
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+ info->read_status_mask &= ~UART_LSR_DR;
+ if (info->flags & ASYNC_INITIALIZED) {
+ /* disable receive interrupts */
+ custom.intena = IF_RBF;
+ mb();
+ /* clear any pending receive interrupt */
+ custom.intreq = IF_RBF;
+ mb();
+
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ rs_wait_until_sent(tty, info->timeout);
+ }
+ shutdown(info);
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+ tty->closing = 0;
+ info->event = 0;
+ info->tty = NULL;
+ if (info->blocked_open) {
+ if (info->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
+ }
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ local_irq_restore(flags);
+}
+
+/*
+ * rs_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+ int lsr;
+
+ if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
+ return;
+
+ if (info->xmit_fifo_size == 0)
+ return; /* Just in case.... */
+
+ orig_jiffies = jiffies;
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = (info->timeout - HZ/50) / info->xmit_fifo_size;
+ char_time = char_time / 5;
+ if (char_time == 0)
+ char_time = 1;
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than info->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*info->timeout.
+ */
+ if (!timeout || timeout > 2*info->timeout)
+ timeout = 2*info->timeout;
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time);
+ printk("jiff=%lu...", jiffies);
+#endif
+ while(!((lsr = custom.serdatr) & SDR_TSRE)) {
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("serdatr = %d (jiff=%lu)...", lsr, jiffies);
+#endif
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ current->state = TASK_RUNNING;
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
+#endif
+}
+
+/*
+ * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void rs_hangup(struct tty_struct *tty)
+{
+ struct async_struct * info = (struct async_struct *)tty->driver_data;
+ struct serial_state *state = info->state;
+
+ if (serial_paranoia_check(info, tty->name, "rs_hangup"))
+ return;
+
+ state = info->state;
+
+ rs_flush_buffer(tty);
+ shutdown(info);
+ info->event = 0;
+ state->count = 0;
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->tty = NULL;
+ wake_up_interruptible(&info->open_wait);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_open() and friends
+ * ------------------------------------------------------------
+ */
+static int block_til_ready(struct tty_struct *tty, struct file * filp,
+ struct async_struct *info)
+{
+#ifdef DECLARE_WAITQUEUE
+ DECLARE_WAITQUEUE(wait, current);
+#else
+ struct wait_queue wait = { current, NULL };
+#endif
+ struct serial_state *state = info->state;
+ int retval;
+ int do_clocal = 0, extra_count = 0;
+ unsigned long flags;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ return ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, state->count is dropped by one, so that
+ * rs_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready before block: ttys%d, count = %d\n",
+ state->line, state->count);
+#endif
+ local_irq_save(flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+ state->count--;
+ }
+ local_irq_restore(flags);
+ info->blocked_open++;
+ while (1) {
+ local_irq_save(flags);
+ if (tty->termios->c_cflag & CBAUD)
+ rtsdtr_ctrl(SER_DTR|SER_RTS);
+ local_irq_restore(flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & ASYNC_INITIALIZED)) {
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & ASYNC_CLOSING) &&
+ (do_clocal || (!(ciab.pra & SER_DCD)) ))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+ schedule();
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&info->open_wait, &wait);
+ if (extra_count)
+ state->count++;
+ info->blocked_open--;
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready after blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+ if (retval)
+ return retval;
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+}
+
+static int get_async_struct(int line, struct async_struct **ret_info)
+{
+ struct async_struct *info;
+ struct serial_state *sstate;
+
+ sstate = rs_table + line;
+ sstate->count++;
+ if (sstate->info) {
+ *ret_info = sstate->info;
+ return 0;
+ }
+ info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
+ if (!info) {
+ sstate->count--;
+ return -ENOMEM;
+ }
+ memset(info, 0, sizeof(struct async_struct));
+#ifdef DECLARE_WAITQUEUE
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
+#endif
+ info->magic = SERIAL_MAGIC;
+ info->port = sstate->port;
+ info->flags = sstate->flags;
+ info->xmit_fifo_size = sstate->xmit_fifo_size;
+ info->line = line;
+ tasklet_init(&info->tlet, do_softint, (unsigned long)info);
+ info->state = sstate;
+ if (sstate->info) {
+ kfree(info);
+ *ret_info = sstate->info;
+ return 0;
+ }
+ *ret_info = sstate->info = info;
+ return 0;
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+static int rs_open(struct tty_struct *tty, struct file * filp)
+{
+ struct async_struct *info;
+ int retval, line;
+ unsigned long page;
+
+ line = tty->index;
+ if ((line < 0) || (line >= NR_PORTS)) {
+ return -ENODEV;
+ }
+ retval = get_async_struct(line, &info);
+ if (retval) {
+ return retval;
+ }
+ tty->driver_data = info;
+ info->tty = tty;
+ if (serial_paranoia_check(info, tty->name, "rs_open"))
+ return -ENODEV;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s, count = %d\n", tty->name, info->state->count);
+#endif
+ info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ if (!tmp_buf) {
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ return -ENOMEM;
+ }
+ if (tmp_buf)
+ free_page(page);
+ else
+ tmp_buf = (unsigned char *) page;
+ }
+
+ /*
+ * If the port is the middle of closing, bail out now
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ return ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * Start up serial port
+ */
+ retval = startup(info);
+ if (retval) {
+ return retval;
+ }
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open returning after block_til_ready with %d\n",
+ retval);
+#endif
+ return retval;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s successful...", tty->name);
+#endif
+ return 0;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline int line_info(char *buf, struct serial_state *state)
+{
+ struct async_struct *info = state->info, scr_info;
+ char stat_buf[30], control, status;
+ int ret;
+ unsigned long flags;
+
+ ret = sprintf(buf, "%d: uart:amiga_builtin",state->line);
+
+ /*
+ * Figure out the current RS-232 lines
+ */
+ if (!info) {
+ info = &scr_info; /* This is just for serial_{in,out} */
+
+ info->magic = SERIAL_MAGIC;
+ info->flags = state->flags;
+ info->quot = 0;
+ info->tty = NULL;
+ }
+ local_irq_save(flags);
+ status = ciab.pra;
+ control = info ? info->MCR : status;
+ local_irq_restore(flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if(!(control & SER_RTS))
+ strcat(stat_buf, "|RTS");
+ if(!(status & SER_CTS))
+ strcat(stat_buf, "|CTS");