aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 08:26:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 08:26:17 -0800
commit3c2e81ef344a90bb0a39d84af6878b4aeff568a2 (patch)
treebd8c8b23466174899d2fe4d35af6e1e838edb068
parent221392c3ad0432e39fd74a349364f66cb0ed78f6 (diff)
parent55bde6b1442fed8af67b92d21acce67db454c9f9 (diff)
downloadvexpress-lsk-3c2e81ef344a90bb0a39d84af6878b4aeff568a2.tar.gz
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie: "This is the one and only next pull for 3.8, we had a regression we found last week, so I was waiting for that to resolve itself, and I ended up with some Intel fixes on top as well. Highlights: - new driver: nvidia tegra 20/30/hdmi support - radeon: add support for previously unused DMA engines, more HDMI regs, eviction speeds ups and fixes - i915: HSW support enable, agp removal on GEN6, seqno wrapping - exynos: IPP subsystem support (image post proc), HDMI - nouveau: display class reworking, nv20->40 z compression - ttm: start of locking fixes, rcu usage for lookups, - core: documentation updates, docbook integration, monotonic clock usage, move from connector to object properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (590 commits) drm/exynos: add gsc ipp driver drm/exynos: add rotator ipp driver drm/exynos: add fimc ipp driver drm/exynos: add iommu support for ipp drm/exynos: add ipp subsystem drm/exynos: support device tree for fimd radeon: fix regression with eviction since evict caching changes drm/radeon: add more pedantic checks in the CP DMA checker drm/radeon: bump version for CS ioctl support for async DMA drm/radeon: enable the async DMA rings in the CS ioctl drm/radeon: add VM CS parser support for async DMA on cayman/TN/SI drm/radeon/kms: add evergreen/cayman CS parser for async DMA (v2) drm/radeon/kms: add 6xx/7xx CS parser for async DMA (v2) drm/radeon: fix htile buffer size computation for command stream checker drm/radeon: fix fence locking in the pageflip callback drm/radeon: make indirect register access concurrency-safe drm/radeon: add W|RREG32_IDX for MM_INDEX|DATA based mmio accesss drm/exynos: support extended screen coordinate of fimd drm/exynos: fix x, y coordinates for right bottom pixel drm/exynos: fix fb offset calculation for plane ...
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--Documentation/DocBook/drm.tmpl39
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt191
-rw-r--r--Documentation/kref.txt88
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/mm/dma-mapping.c41
-rw-r--r--drivers/char/agp/intel-agp.h91
-rw-r--r--drivers/char/agp/intel-gtt.c320
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c13
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig30
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2001
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c200
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1870
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2060
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c855
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c376
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c66
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c95
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h472
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c292
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c86
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h308
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1945
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c961
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c227
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c90
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c511
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c250
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h36
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c101
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c218
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c739
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h131
-rw-r--r--drivers/gpu/drm/radeon/ni.c357
-rw-r--r--drivers/gpu/drm/radeon/nid.h86
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c480
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c357
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/rv515.c122
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c355
-rw-r--r--drivers/gpu/drm/radeon/sid.h119
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c834
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c115
-rw-r--r--drivers/gpu/drm/tegra/drm.h234
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1334
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c325
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c321
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h19
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h39
-rw-r--r--include/drm/drm_hashtab.h14
-rw-r--r--include/drm/exynos_drm.h26
-rw-r--r--include/drm/intel-gtt.h7
-rw-r--r--include/drm/ttm/ttm_bo_api.h33
-rw-r--r--include/drm/ttm/ttm_bo_driver.h45
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_memory.h2
-rw-r--r--include/drm/ttm/ttm_object.h4
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/kref.h21
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/exynos_drm.h203
-rw-r--r--include/uapi/drm/i915_drm.h6
-rw-r--r--include/uapi/drm/radeon_drm.h6
364 files changed, 38931 insertions, 14876 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index f50309081ac..e59480db9ee 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index b0300529ab1..4ee2304f82f 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
the <methodname>page_flip</methodname> operation will be called with a
non-NULL <parameter>event</parameter> argument pointing to a
<structname>drm_pending_vblank_event</structname> instance. Upon page
- flip completion the driver must fill the
- <parameter>event</parameter>::<structfield>event</structfield>
- <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
- and <structfield>tv_usec</structfield> fields with the associated
- vertical blanking count and timestamp, add the event to the
- <parameter>drm_file</parameter> list of events to be signaled, and wake
- up any waiting process. This can be performed with
+ flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
+ to fill in the event and send to wake up any waiting processes.
+ This can be performed with
<programlisting><![CDATA[
- struct timeval now;
-
- event->event.sequence = drm_vblank_count_and_time(..., &now);
- event->event.tv_sec = now.tv_sec;
- event->event.tv_usec = now.tv_usec;
-
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&event->base.link, &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
+ ...
+ drm_send_vblank_event(dev, pipe, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
]]></programlisting>
</para>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
</sect1>
- <!-- Internals: mid-layer helper functions -->
+ <!-- Internals: kms helper functions -->
<sect1>
- <title>Mid-layer Helper Functions</title>
+ <title>Mode Setting Helper Functions</title>
<para>
The CRTC, encoder and connector functions provided by the drivers
implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
</listitem>
</itemizedlist>
</sect2>
+ <sect2>
+ <title>Modeset Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_crtc_helper.c
+ </sect2>
+ <sect2>
+ <title>fbdev Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+ </sect2>
+ <sect2>
+ <title>Display Port Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+ </sect2>
</sect1>
<!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644
index 00000000000..b4fa934ae3a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -0,0 +1,191 @@
+NVIDIA Tegra host1x
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-host1x"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt outputs from the controller.
+- #address-cells: The number of cells used to represent physical base addresses
+ in the host1x address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+ range in the host1x address space. Should be 1.
+- ranges: The mapping of the host1x address space to the CPU address space.
+
+The host1x top-level node defines a number of children, each representing one
+of the following host1x client modules:
+
+- mpe: video encoder
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-mpe"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- vi: video input
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-vi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- epp: encoder pre-processor
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-epp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- isp: image signal processor
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-isp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr2d: 2D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-gr2d"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr3d: 3D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-gr3d"
+ - reg: Physical base address and length of the controller's registers.
+
+- dc: display controller
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-dc"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+ Each display controller node has a child node, named "rgb", that represents
+ the RGB output associated with the controller. It can take the following
+ optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- hdmi: High Definition Multimedia Interface
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-hdmi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+ - vdd-supply: regulator for supply voltage
+ - pll-supply: regulator for PLL
+
+ Optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- tvo: TV encoder output
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-tvo"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- dsi: display serial interface
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-dsi"
+ - reg: Physical base address and length of the controller's registers.
+
+Example:
+
+/ {
+ ...
+
+ host1x {
+ compatible = "nvidia,tegra20-host1x", "simple-bus";
+ reg = <0x50000000 0x00024000>;
+ interrupts = <0 65 0x04 /* mpcore syncpt */
+ 0 67 0x04>; /* mpcore general */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x54000000 0x54000000 0x04000000>;
+
+ mpe {
+ compatible = "nvidia,tegra20-mpe";
+ reg = <0x54040000 0x00040000>;
+ interrupts = <0 68 0x04>;
+ };
+
+ vi {
+ compatible = "nvidia,tegra20-vi";
+ reg = <0x54080000 0x00040000>;
+ interrupts = <0 69 0x04>;
+ };
+
+ epp {
+ compatible = "nvidia,tegra20-epp";
+ reg = <0x540c0000 0x00040000>;
+ interrupts = <0 70 0x04>;
+ };
+
+ isp {
+ compatible = "nvidia,tegra20-isp";
+ reg = <0x54100000 0x00040000>;
+ interrupts = <0 71 0x04>;
+ };
+
+ gr2d {
+ compatible = "nvidia,tegra20-gr2d";
+ reg = <0x54140000 0x00040000>;
+ interrupts = <0 72 0x04>;
+ };
+
+ gr3d {
+ compatible = "nvidia,tegra20-gr3d";
+ reg = <0x54180000 0x00040000>;
+ };
+
+ dc@54200000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54200000 0x00040000>;
+ interrupts = <0 73 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ dc@54240000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54240000 0x00040000>;
+ interrupts = <0 74 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ hdmi {
+ compatible = "nvidia,tegra20-hdmi";
+ reg = <0x54280000 0x00040000>;
+ interrupts = <0 75 0x04>;
+ status = "disabled";
+ };
+
+ tvo {
+ compatible = "nvidia,tegra20-tvo";
+ reg = <0x542c0000 0x00040000>;
+ interrupts = <0 76 0x04>;
+ status = "disabled";
+ };
+
+ dsi {
+ compatible = "nvidia,tegra20-dsi";
+ reg = <0x54300000 0x00040000>;
+ status = "disabled";
+ };
+ };
+
+ ...
+};
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index 48ba715d5a6..ddf85a5dde0 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ mutex_unlock(&mutex);
+ return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+ struct rcu_head rhead;
+ .
+ struct kref refcount;
+ .
+ .
+};
+
+static struct my_data *get_entry_rcu()
+{
+ struct my_data *entry = NULL;
+ rcu_read_lock();
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ rcu_read_unlock();
+ return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del_rcu(&entry->link);
+ mutex_unlock(&mutex);
+ kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index d9c31b906ac..6892b26025b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2549,6 +2549,15 @@ S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
+DRM DRIVERS FOR NVIDIA TEGRA
+M: Thierry Reding <thierry.reding@avionic-design.de>
+L: dri-devel@lists.freedesktop.org
+L: linux-tegra@vger.kernel.org
+T: git git://gitorious.org/thierryreding/linux.git
+S: Maintained
+F: drivers/gpu/drm/tegra/
+F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5383bc01857..6b2fb87c869 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages)
return NULL;
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+ {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
while (count) {
int j, order = __fls(count);
@@ -1081,14 +1099,21 @@ error:
return NULL;
}
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
{
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
- for (i = 0; i < count; i++)
- if (pages[i])
- __free_pages(pages[i], 0);
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
if (array_size <= PAGE_SIZE)
kfree(pages);
else
@@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
- pages = __iommu_alloc_buffer(dev, size, gfp);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
return NULL;
@@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
return NULL;
}
@@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
__iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc..1042c1b9037 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED 0x00000002
-#define HSW_PTE_UNCACHED 0x00000000
-#define GEN6_PTE_LLC 0x00000004
-#define GEN6_PTE_LLC_MLC 0x00000006
-#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab..dbd901e94ea 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
- } else if (INTEL_GTT_GEN == 6) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen_size = MB(192);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen_size = MB(288);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen_size = MB(320);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen_size = MB(384);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen_size = MB(416);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen_size = MB(448);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen_size = MB(480);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen_size = MB(512);
- break;
- }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
- int size;
-
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else if (INTEL_GTT_GEN == 6) {
- u16 snb_gmch_ctl;
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- size = MB(2);
- break;
- }
- return size/4;
- } else {
+ else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
- if (INTEL_GTT_GEN >= 6)
- return true;
-
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool gen6_check_flags(unsigned int flags)
-{
- return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else {
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-
- writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr;
+ u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- if (INTEL_GTT_GEN >= 7)
- size = MB(2);
-
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- if (INTEL_GTT_GEN == 3) {
- u32 gtt_addr;
-
+ switch (INTEL_GTT_GEN) {
+ case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
- } else {
- u32 gtt_offset;
-
- switch (INTEL_GTT_GEN) {
- case 5:
- case 6:
- case 7:
- gtt_offset = MB(2);
- break;
- case 4:
- default:
- gtt_offset = KB(512);
- break;
- }
- intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ break;
+ case 5:
+ intel_private.gtt_bus_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_bus_addr = reg_addr + KB(512);
+ break;
}
if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = gen6_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = haswell_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
- .gen = 7,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = valleyview_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
-};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
- "ValleyView", &valleyview_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b88..983201b450f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ea..6f58c81cfcb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb..3602731a611 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c899..dcd1a8c029e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
+
+ return 0;
}
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- cirrus_kick_out_firmware_fb(pdev);
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c83..1413a26e490 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d3..f2d667b8bee 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
if (connector->funcs->reset)
connector->funcs->reset(connector);
+ }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74db..7b2d378b257 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
}
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
+ /* Ignore forced connectors. */
+ if (connector->force)
continue;
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
if (!dev->mode_config.poll_enabled)
return;
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f21245..89e19662716 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
-
}
static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff19..5a3770fbd77 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- int i;
- u32 *raw_edid = (u32 *)in_edid;
+ if (memchr_inv(in_edid, 0, length))
+ return false;
- for (i = 0; i < length / 4; i++)
- if (*(raw_edid + i) != 0)
- return false;
return true;
}
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3..954d175bd7f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
default:
- case DRM_FORCE_ON: s = "ON"; break;
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- printk(KERN_ERR "panic occurred, switching back to text console\n");
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
+ pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
+ if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict) {
+ if (strict)
enable = connector->status == connector_status_connected;
- } else {
+ else
enable = connector->status != connector_status_disconnected;
- }
+
return enable;
}
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
- }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
drm_enable_connectors(fb_helper, enabled);
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
+out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d..80254547a3f 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f2..e77bd8b57df 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b..19c01ca3cc7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- struct timeval stime, raw_time;
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- do_gettimeofday(&stime);
+ stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- do_gettimeofday(&raw_time);
+ etime = ktime_get();
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return gettimeofday timestamp as best estimate.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
*/
- do_gettimeofday(tvblank);
+ *tvblank = get_drm_timestamp();
return 0;
}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
vblwait->reply.sequence = seq;
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf9..d8da30e90db 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e..754bc96e10c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba..200e104f1fa 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (file_priv->minor->master)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
+ kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062a..02296653a05 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb0..1d1f1e5e33f 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a..639b49e1ec0 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1..bef43e0342a 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b322..9601bad47a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (flags & EXYNOS_BO_CONTIG)
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err2;
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
}
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ nr_pages = buf->size >> PAGE_SHIFT;
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
return ret;
}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
-
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba..25cf1628503 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4..2efa4b031d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886..61d5a8402eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir)
+ return &exynos_attach->sgt;
+
+ /* reattaching is not allowed. */
+ if (WARN_ON(exynos_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ sgt = &exynos_attach->sgt;
+
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sgt = ERR_PTR(-EIO);
goto err_unlock;
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd..e0a8e8024b0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a3423103649..f5a97745bf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f..301485215a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
+}
+
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops && overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a..88bb25a2a91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f993..5426cc5a5e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414c..f433eb7533a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
- offset);
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 00000000000..61ea24296b5
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ if (pattern)
+ cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx, false);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ ret = PTR_ERR(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ ret = PTR_ERR(ctx->fimc_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_clk = clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ ret = PTR_ERR(ctx->wb_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ ret = PTR_ERR(ctx->wb_b_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ goto err_ctx;
+ }
+
+ parent_clk = clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ ret = PTR_ERR(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ ret = -EINVAL;
+ clk_put(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ clk_put(parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 00000000000..dc970fa0d88
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1..bf0d9baca2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
+ bool resume;
};
struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1;
bool suspended;
struct mutex lock;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel;
};
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,exynos4-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ if (of_id)
+ return (struct fimd_driver_data *)of_id->data;
+#endif
+
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
+ VIDTCON2_HOZVAL(timing->xres - 1) |
+ VIDTCON2_LINEVAL_E(timing->yres - 1) |
+ VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
}
}
+static void fimd_wait_for_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
};
static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win));
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static void fimd_wait_for_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- int ret;
-
- ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
- VIDCON1_VSTATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
- .wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
out:
return IRQ_HANDLED;
}
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_attach_device(drm_dev, dev);
+
return 0;
}
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0;
}
+static void fimd_window_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ fimd_win_disable(dev, i);
+ }
+ fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
+ struct device *dev = ctx->subdrv.dev;
if (enable) {
int ret;
- struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
+
+ fimd_window_resume(dev);
} else {
+ fimd_window_suspend(dev);
+
fimd_clock(ctx, false);
ctx->suspended = true;
}
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = clk_get(dev, "fimd");
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
+ return PTR_ERR(ctx->bus_clk);
}
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
+ return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_clk;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return -ENXIO;
}
ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return ret;
}
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
ctx->panel = panel;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- return ret;
}
static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
return 0;
}
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
+ .of_match_table = of_match_ptr(fimd_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46..6ffa0763c07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+#define MAX_BUF_ADDR_NR 6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- u32 data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
struct list_head list;
- unsigned int handle;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int gem_nr;
+ unsigned int map_nr;
+ unsigned long handles[MAX_BUF_ADDR_NR];
+ unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
+ struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL);
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+ g2d_userptr->sgt = NULL;
+
+ kfree(g2d_userptr->pages);
+ g2d_userptr->pages = NULL;
+ kfree(g2d_userptr);
+ g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr) {
+ DRM_ERROR("failed to allocate g2d_userptr.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ kfree(g2d_userptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ if (!sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ kfree(pages);
+ kfree(g2d_userptr);
+ pages = NULL;
+ g2d_userptr = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
struct g2d_cmdlist *cmdlist = node->cmdlist;
- dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->gem_nr; i++) {
- struct g2d_gem_node *gem_node;
-
- gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
- if (!gem_node) {
- dev_err(g2d_priv->dev, "failed to allocate gem node\n");
- return -ENOMEM;
- }
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle;
+ dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
- gem_node->handle = cmdlist->data[offset];
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
- file);
- if (IS_ERR(addr)) {
- node->gem_nr = i;
- kfree(gem_node);
- return PTR_ERR(addr);
+ handle = cmdlist->data[offset];
+
+ if (node->obj_type[i] == BUF_TYPE_GEM) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
}
cmdlist->data[offset] = *addr;
- list_add_tail(&gem_node->list, &g2d_priv->gem_list);
- g2d_priv->gem_nr++;
+ node->handles[i] = handle;
}
return 0;
}
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_gem_node *node, *n;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int i;
- list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
- if (!nr)
- break;
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle = node->handles[i];
- exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
- list_del_init(&node->list);
- kfree(node);
- nr--;
+ if (node->obj_type[i] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ node->handles[i] = 0;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
+
+ if (for_addr) {
+ /* check userptr buffer type. */
+ reg_offset = (cmdlist->data[index] &
+ ~0x7fffffff) >> 31;
+ if (reg_offset) {
+ node->obj_type[i] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ }
+ }
+
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
+
+ if (node->obj_type[i] != BUF_TYPE_USERPTR)
+ node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->gem_nr = req->cmd_gem_nr;
- if (req->cmd_gem_nr) {
- struct drm_exynos_g2d_cmd *cmd_gem;
+ node->map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_gem,
- sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_gem_nr * 2;
+ cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+ g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->gem_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
mutex_unlock(&g2d->cmdlist_mutex);
- g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_destroy_workqueue;
-
- g2d->gate_clk = clk_get(dev, "fimg2d");
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ g2d->max_pool = MAX_POOL;
+
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
- clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664..d48183e7e05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
+ /* TODO */
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+ if (!buf->sgt)
+ return -EINTR;
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
-
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f7..f11f2afd5bf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
- struct sg_table *sgt;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
struct page **pages;
- unsigned long page_size;
+ struct sg_table *sgt;
unsigned long size;
+ bool pfnmap;
};
/*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
+ * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
+ struct vm_area_struct *vma;
unsigned int flags;
};
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 00000000000..5639353d47b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+ __func__, sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = readl(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ ret = PTR_ERR(ctx->gsc_clk);
+ goto err_ctx;
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->gsc_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->gsc_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = __devexit_p(gsc_remove),
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 00000000000..b3c3bc618c0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b4518..55793c46e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
+int exynos_platform_device_hdmi_register(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ return -EEXIST;
+
+ exynos_drm_hdmi_pdev = platform_device_register_simple(
+ "exynos-drm-hdmi", -1, NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+ return PTR_ERR(exynos_drm_hdmi_pdev);
+
+ return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (mixer_ops && mixer_ops->wait_for_vblank)
+ mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
- .wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
return 0;
}
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct drm_hdmi_context *ctx;
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a05..fcc3093ec8f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
+ int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
- void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 00000000000..2482b7f9634
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 00000000000..18a0ca190b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 00000000000..49eebe948ed
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("failed to get idr.\n");
+ return -ENOMEM;
+ }
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ mutex_unlock(lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("%s:used device.\n", __func__);
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("%s:not support property.\n",
+ __func__);
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command contro.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ if (!list_empty(&ippdrv->cmd_list)) {
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+ if (c_node->property.prop_id == prop_id)
+ return ippdrv;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some deivce not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return -EINVAL;
+ }
+
+ prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+ __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work) {
+ DRM_ERROR("failed to alloc cmd_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work) {
+ DRM_ERROR("failed to alloc event_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_clear;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR_OR_NULL(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* delete list */
+ list_del(&c_node->list);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+ i ? "dst" : "src");
+ continue;
+ }
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+ __func__, count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ goto err_unlock;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+ __func__, i, buf_info.base[i],
+ (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+ mutex_unlock(&c_node->mem_lock);
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ if (list_empty(&m_node->list)) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ list_add_tail(&e->base.link, &c_node->event_list);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /*
+ * quf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ return;
+ }
+ }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return -EFAULT;
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->cmd_lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->cmd_lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ break;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ c_node->state = IPP_STATE_START;
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->cmd = c_node;
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->cmd = NULL;
+ return ret;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+ __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->cmd_lock);
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ if (!e) {
+ DRM_ERROR("empty event.\n");
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+ , __func__, now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+ event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->cmd;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+ __func__, c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ mutex_lock(&c_node->event_lock);
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+
+ mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_idr;
+ }
+
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+ count++, (int)ippdrv, ippdrv->ipp_id);
+
+ if (ippdrv->ipp_id == 0) {
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ ippdrv->ipp_id);
+ goto err_idr;
+ }
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_iommu;
+ }
+ }
+ }
+
+ return 0;
+
+err_iommu:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ goto err_clear;
+ }
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (list_empty(&ippdrv->cmd_list))
+ continue;
+
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+ __func__, count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ }
+
+err_clear:
+ kfree(priv);
+ return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+err_clear:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = __devexit_p(ipp_remove),
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 00000000000..28ffac95386
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex cmd_lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ u32 ipp_id;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *cmd;
+ struct list_head cmd_list;
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb210..83efc662d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ----------------
* ^ start ^ end
*
- * There are six cases from a to b.
+ * There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
}
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
- else
- src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
- else
- src_y += crtc_h;
crtc_y = 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 00000000000..1c2366083c7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else
+ DRM_ERROR("the SFR is set illegally\n");
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:not support format\n", __func__);
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot) {
+ dev_err(dev, "failed to allocate rot\n");
+ return -ENOMEM;
+ }
+
+ rot->limit_tbl = (struct rot_limit_table *)
+ platform_get_device_id(pdev)->driver_data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rot->regs_res) {
+ dev_err(dev, "failed to find registers\n");
+ ret = -ENOENT;
+ goto err_get_resource;
+ }
+
+ rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+ if (!rot->regs) {
+ dev_err(dev, "failed to map register\n");
+ ret = -ENXIO;
+ goto err_get_resource;
+ }
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = rot->irq;
+ goto err_get_irq;
+ }
+
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+ IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_get_irq;
+ }
+
+ rot->clock = clk_get(dev, "rotator");
+ if (IS_ERR_OR_NULL(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(rot->clock);
+ goto err_clk_get;
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+err_clk_get:
+ free_irq(rot->irq, rot);
+err_get_irq:
+ devm_iounmap(dev, rot->regs);
+err_get_resource:
+ devm_kfree(dev, rot);
+ return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+
+ free_irq(rot->irq, rot);
+ devm_iounmap(dev, rot->regs);
+
+ devm_kfree(dev, rot);
+
+ return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+ {
+ .name = "exynos-rot",
+ .driver_data = (unsigned long)&rot_limit_tbl,
+ },
+ {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = __devexit_p(rotator_remove),
+ .id_table = rotator_driver_ids,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 00000000000..a2d7a14a52b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f..99bfc38dfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a..2c46b6c0b82 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+ /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+ /* InfoFrame packet type */
+ HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+ /* Vendor-Specific InfoFrame */
+ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+ /* Auxiliary Video information InfoFrame */
+ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+ /* Audio information InfoFrame */
+ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
+ void *parent_ctx;
int external_irq;
int internal_irq;
@@ -84,7 +108,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
- void *parent_ctx;
int hpd_gpio;
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+ &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
+struct hdmi_infoframe {
+ enum HDMI_PACKET_TYPE type;
+ u8 ver;
+ u8 len;
+};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ struct hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 aspect_ratio;
+ u32 mod;
+ u32 vic;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->type) {
+ case HDMI_PACKET_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+ if (hdata->type == HDMI_TYPE13)
+ vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+ else
+ vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_PACKET_TYPE_AUI:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
+ kfree(raw_edid);
} else {
return -ENODEV;
}
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
+ struct hdmi_infoframe infoframe;
+
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
+ infoframe.type = HDMI_PACKET_TYPE_AVI;
+ infoframe.ver = HDMI_AVI_VERSION;
+ infoframe.len = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.type = HDMI_PACKET_TYPE_AUI;
+ infoframe.ver = HDMI_AUI_VERSION;
+ infoframe.len = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
/* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
mdelay(10);
}
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
break;
}
}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
- pm_runtime_get_sync(hdata->dev);
-
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
+ hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
+ if (pm_runtime_suspended(hdata->dev))
+ pm_runtime_get_sync(hdata->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- hdmi_poweroff(hdata);
+ if (!pm_runtime_suspended(hdata->dev))
+ pm_runtime_put_sync(hdata->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
+ res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = clk_get(dev, "hdmiphy");
+ res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2217,28 +2373,6 @@ fail:
return -ENODEV;
}
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR_OR_NULL(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR_OR_NULL(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
-
- return 0;
-}
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
}
};
+#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+#endif
static int __devinit hdmi_probe(struct platform_device *pdev)
{
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
+ if (match == NULL)
+ return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- goto err_data;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
+ return -ENOENT;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
- ret = -ENXIO;
- goto err_resource;
+ return -ENXIO;
}
- ret = gpio_request(hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- goto err_resource;
+ return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- ret = -ENOENT;
- goto err_gpio;
+ return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_gpio:
- gpio_free(hdata->hpd_gpio);
-err_resource:
- hdmi_resources_cleanup(hdata);
-err_data:
return ret;
}
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
- gpio_free(hdata->hpd_gpio);
-
- hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
disable_irq(hdata->internal_irq);
disable_irq(hdata->external_irq);
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
hdmi_poweroff(hdata);
return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
enable_irq(hdata->external_irq);
enable_irq(hdata->internal_irq);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ hdmi_poweron(hdata);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweron(hdata);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+ SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = hdmi_match_types,
+ .of_match_table = of_match_ptr(hdmi_match_types),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bb..6206056f4a3 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
+ .of_match_table = of_match_ptr(hdmiphy_match_types),
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8..21db89530fc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
dma_addr_t dma_addr;
- void __iomem *vaddr;
dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
uint32_t pixel_format;
unsigned int bpp;
unsigned int crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
+ bool enabled;
+ bool resume;
};
struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
+ struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -90,6 +92,9 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
+ void *parent_ctx;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static void mixer_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
- return;
- }
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
-
- pm_runtime_get_sync(ctx->dev);
-
- clk_enable(res->mixer);
- if (ctx->vp_enabled) {
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
- }
-
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
- mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *mdata = ctx;
+ struct drm_device *drm_dev;
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
- mutex_unlock(&ctx->mixer_mutex);
+ drm_hdmi_ctx = mdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ if (is_drm_iommu_supported(drm_dev)) {
+ if (enable)
+ return drm_iommu_attach_device(drm_dev, mdata->dev);
- clk_disable(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
+ drm_iommu_detach_device(drm_dev, mdata->dev);
}
-
- pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
+ return 0;
}
static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_dpms(void *ctx, int mode)
-{
- struct mixer_context *mixer_ctx = ctx;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- mixer_poweron(mixer_ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mixer_ctx);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
- int ret;
-
- ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
- MXR_INT_STATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0];
- win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp;
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
}
static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(ctx, i);
+ }
+ mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ }
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_window_suspend(ctx);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+ }
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_get_sync(mixer_ctx->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_put_sync(mixer_ctx->dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
}
static struct exynos_mixer_ops mixer_ops = {
/* manager */
+ .iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
.dpms = mixer_dpms,
/* overlay */
- .wait_for_vblank = mixer_wait_for_vblank,
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(drm_dev, crtc);
}
- if (is_checked)
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
}
out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = clk_get(dev, "mixer");
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail;
+ return ret;
}
mixer_res->irq = res->start;
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
- clk_put(mixer_res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(mixer_res->mixer))
- clk_put(mixer_res->mixer);
- return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- int ret;
- mixer_res->vp = clk_get(dev, "vp");
+ mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
- clk_put(mixer_res->sclk_dac);
- if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
- clk_put(mixer_res->sclk_mixer);
- if (!IS_ERR_OR_NULL(mixer_res->vp))
- clk_put(mixer_res->vp);
- return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
+ ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
mixer_poweroff(ctx);
return 0;
}
+
+static int mixer_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
#endif
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+ SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
struct platform_driver mixer_driver = {
.driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 00000000000..b4f9ca1fd85
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 00000000000..9ad592707aa
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718..ef1b3eb3ba6 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
/* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 00000000000..a09ac6e180d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b6..23e14e93991 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf3