aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
commit4bf772b14675411a69b3c807f73006de0fe4b649 (patch)
treeb841e3ba0e3429695589cb0ab73871fa12f42c38
parent3879ae653a3e98380fe2daf653338830b7ca0097 (diff)
parent24b8ef699e8221d2b7f813adaab13eec053e1507 (diff)
downloadlinux-stericsson-4bf772b14675411a69b3c807f73006de0fe4b649.tar.gz
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This seems to have been a comparatively quieter merge window, I assume due to holidays etc. The "biggest" change is AMD header cleanups, which merge/remove a bunch of them. The AMD gpu scheduler is now being made generic with the etnaviv driver wanting to reuse the code, hopefully other drivers can go in the same direction. Otherwise it's the usual lots of stuff in i915/amdgpu, not so much stuff elsewhere. Core: - Add .last_close and .output_poll_changed helpers to reduce driver footprints - Fix plane clipping - Improved debug printing support - Add panel orientation property - Update edid derived properties at edid setting - Reduction in fbdev driver footprint - Move amdgpu scheduler into core for other drivers to use. i915: - Selftest and IGT improvements - Fast boot prep work on IPS, pipe config - HW workarounds for Cannonlake, Geminilake - Cannonlake clock and HDMI2.0 fixes - GPU cache invalidation and context switch improvements - Display planes cleanup - New PMU interface for perf queries - New firmware support for KBL/SKL - Geminilake HW workaround for perforamce - Coffeelake stolen memory improvements - GPU reset robustness work - Cannonlake horizontal plane flipping - GVT work amdgpu/radeon: - RV and Vega header file cleanups (lots of lines gone!) - TTM operation context support - 48-bit GPUVM support for Vega/RV - ECC support for Vega - Resizeable BAR support - Multi-display sync support - Enable swapout for reserved BOs during allocation - S3 fixes on Raven - GPU reset cleanup and fixes - 2+1 level GPU page table amdkfd: - GFX7/8 SDMA user queues support - Hardware scheduling for multiple processes - dGPU prep work rcar: - Added R8A7743/5 support - System suspend/resume support sun4i: - Multi-plane support for YUV formats - A83T and LVDS support msm: - Devfreq support for GPU tegra: - Prep work for adding Tegra186 support - Tegra186 HDMI support - HDMI2.0 and zpos support by using generic helpers tilcdc: - Misc fixes omapdrm: - Support memory bandwidth limits - DSI command mode panel cleanups - DMM error handling exynos: - drop the old IPP subdriver. etnaviv: - Occlusion query fixes - Job handling fixes - Prep work for hooking in gpu scheduler armada: - Move closer to atomic modesetting - Allow disabling primary plane if overlay is full screen imx: - Format modifier support - Add tile prefetch to PRE - Runtime PM support for PRG ast: - fix LUT loading" * tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux: (1471 commits) drm/ast: Load lut in crtc_commit drm: Check for lessee in DROP_MASTER ioctl drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing dma-buf: fix reservation_object_wait_timeout_rcu once more v2 drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm: Fix PANEL_ORIENTATION_QUIRKS breaking the Kconfig DRM menuconfig drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo ...
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt4
-rw-r--r--Documentation/devicetree/bindings/display/ilitek,ili9225.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt49
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.txt10
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/panel/simple-panel.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt (renamed from Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt)4
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt30
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt1
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.txt35
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.txt6
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt11
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt14
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,dra7-dss.txt5
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,omap2-dss.txt4
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,omap3-dss.txt4
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,omap4-dss.txt4
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,omap5-dss.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt3
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst39
-rw-r--r--Documentation/gpu/drm-kms.rst14
-rw-r--r--Documentation/gpu/i915.rst4
-rw-r--r--Documentation/gpu/todo.rst41
-rw-r--r--MAINTAINERS29
-rw-r--r--arch/x86/include/asm/iosf_mbi.h25
-rw-r--r--arch/x86/kernel/early-quirks.c87
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c19
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/char/agp/intel-gtt.c16
-rw-r--r--drivers/dma-buf/dma-buf.c9
-rw-r--r--drivers/dma-buf/dma-fence-array.c14
-rw-r--r--drivers/dma-buf/reservation.c66
-rw-r--r--drivers/gpu/drm/Kconfig10
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c792
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h (renamed from drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c)73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1900
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1024
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c329
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c369
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c304
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c10
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/vce_v4_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm1384
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c124
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c167
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c59
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1061
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h33
-rw-r--r--drivers/gpu/drm/amd/display/TODO3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c303
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c625
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h594
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h207
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h293
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c278
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c373
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c294
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h530
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c221
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h214
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h569
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2068
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c529
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c)439
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h)150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h157
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h33
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c84
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h172
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h453
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h2045
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_default.h)7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_offset.h)14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_sh_mask.h)45
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h)14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h601
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h1463
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h7988
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h4005
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h31191
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_default.h1028
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_sh_mask.h1658
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_default.h202
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h286
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h547
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h1852
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h1810
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h31
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h36
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h241
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h453
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h2045
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h9868
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h117
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h601
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h342
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h1463
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h1271
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h286
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h547
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h1852
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h1810
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h127
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h144
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h52
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h294
-rw-r--r--drivers/gpu/drm/amd/include/soc15ip.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/soc15ip.h)0
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/vega10_enum.h)0
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c170
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c182
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c61
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h23
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h275
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h185
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c8
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c63
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c3
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c395
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h24
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c282
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c7
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c2
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c53
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_atomic.c70
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c112
-rw-r--r--drivers/gpu/drm/drm_auth.c6
-rw-r--r--drivers/gpu/drm/drm_blend.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c73
-rw-r--r--drivers/gpu/drm/drm_debugfs.c8
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c47
-rw-r--r--drivers/gpu/drm/drm_edid.c110
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c156
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c352
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c89
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c53
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c76
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c (renamed from drivers/video/fbdev/core/fbcon_dmi_quirks.c)104
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c111
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_print.c54
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c9
-rw-r--r--drivers/gpu/drm/drm_syncobj.c56
-rw-r--r--drivers/gpu/drm/drm_vblank.c12
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c15
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c29
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c23
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c199
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c215
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig11
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1806
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c3
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h (renamed from include/video/exynos5433_decon.h)6
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h (renamed from include/video/exynos7_decon.h)8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/mmu.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c17
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c11
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug29
-rw-r--r--drivers/gpu/drm/i915/Makefile32
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c264
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h24
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c212
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c80
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c537
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.h67
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c498
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c514
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h169
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c298
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h45
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c153
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h127
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c842
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c301
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c412
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h (renamed from drivers/gpu/drm/i915/gvt/render.h)9
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h79
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c360
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c405
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c670
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h35
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c73
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c438
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c182
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h643
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c538
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c368
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c192
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c139
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c68
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c138
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c323
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c195
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c14
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c7
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c121
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.c59
-rw-r--r--drivers/gpu/drm/i915/i915_params.h14
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c153
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c865
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h111
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h93
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h35
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c202
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c24
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c105
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c393
-rw-r--r--drivers/gpu/drm/i915/intel_color.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c347
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c200
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h183
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1001
-rw-r--r--drivers/gpu/drm/i915/intel_display.h321
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c594
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c79
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c122
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c107
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h80
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c475
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c48
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c152
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c245
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h40
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/i915_guc_reg.h)14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c (renamed from drivers/gpu/drm/i915/i915_guc_submission.c)751
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h (renamed from drivers/gpu/drm/i915/i915_guc_submission.h)17
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c61
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c114
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c83
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c283
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c88
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c437
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c747
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h263
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c267
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h23
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c172
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c181
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c52
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c353
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c331
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c110
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c8
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c38
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c140
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c19
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c291
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h37
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c41
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c25
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c45
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h140
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h46
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c11
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h99
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.h39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h37
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c4
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h4
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig8
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c962
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c30
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c62
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c63
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c114
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c62
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c75
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c64
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c28
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h6
-rw-r--r--drivers/gpu/drm/scheduler/Makefile26
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c (renamed from drivers/gpu/drm/amd/scheduler/gpu_scheduler.c)396
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c (renamed from drivers/gpu/drm/amd/scheduler/sched_fence.c)122
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c5
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c2
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.h2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c29
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c12
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h2
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c40
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h2
-rw-r--r--drivers/gpu/drm/stm/drv.c41
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/ltdc.c75
-rw-r--r--drivers/gpu/drm/stm/ltdc.h4
-rw-r--r--drivers/gpu/drm/sun4i/Makefile5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c177
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.h12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c248
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h33
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c93
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.c134
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c517
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h126
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c349
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h63
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c172
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c390
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h51
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c971
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h58
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c1914
-rw-r--r--drivers/gpu/drm/tegra/dc.h289
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c120
-rw-r--r--drivers/gpu/drm/tegra/drm.c177
-rw-r--r--drivers/gpu/drm/tegra/drm.h30
-rw-r--r--drivers/gpu/drm/tegra/dsi.c228
-rw-r--r--drivers/gpu/drm/tegra/fb.c50
-rw-r--r--drivers/gpu/drm/tegra/gem.c15
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c504
-rw-r--r--drivers/gpu/drm/tegra/hub.c806
-rw-r--r--drivers/gpu/drm/tegra/hub.h81
-rw-r--r--drivers/gpu/drm/tegra/output.c25
-rw-r--r--drivers/gpu/drm/tegra/plane.c383
-rw-r--r--drivers/gpu/drm/tegra/plane.h70
-rw-r--r--drivers/gpu/drm/tegra/sor.c1117
-rw-r--r--drivers/gpu/drm/tegra/sor.h16
-rw-r--r--drivers/gpu/drm/tegra/vic.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c51
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c28
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h2
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig20
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c104
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c4
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c469
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c10
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c32
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c3
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c215
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c341
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c99
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c38
-rw-r--r--drivers/gpu/drm/via/via_drv.h4
-rw-r--r--drivers/gpu/drm/via/via_irq.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c12
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c48
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c1
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c15
-rw-r--r--drivers/gpu/host1x/bus.c11
-rw-r--r--drivers/gpu/host1x/dev.c36
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/gpu/ipu-v3/Kconfig4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c84
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c37
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c23
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/core/Makefile3
-rw-r--r--drivers/video/fbdev/core/fbcon.c22
-rw-r--r--drivers/video/fbdev/core/fbcon.h6
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c1
-rw-r--r--drivers/video/fbdev/efifb.c21
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c12
-rw-r--r--drivers/video/hdmi.c51
-rw-r--r--include/drm/drmP.h197
-rw-r--r--include/drm/drm_atomic.h32
-rw-r--r--include/drm/drm_atomic_helper.h7
-rw-r--r--include/drm/drm_connector.h54
-rw-r--r--include/drm/drm_device.h9
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_drv.h22
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_encoder.h6
-rw-r--r--include/drm/drm_fb_cma_helper.h13
-rw-r--r--include/drm/drm_fb_helper.h57
-rw-r--r--include/drm/drm_framebuffer.h8
-rw-r--r--include/drm/drm_gem_cma_helper.h16
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_mode_config.h27
-rw-r--r--include/drm/drm_modeset_helper.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h3
-rw-r--r--include/drm/drm_plane.h14
-rw-r--r--include/drm/drm_plane_helper.h5
-rw-r--r--include/drm/drm_print.h219
-rw-r--r--include/drm/drm_syncobj.h36
-rw-r--r--include/drm/drm_utils.h15
-rw-r--r--include/drm/drm_vma_manager.h2
-rw-r--r--include/drm/gpu_scheduler.h173
-rw-r--r--include/drm/gpu_scheduler_trace.h (renamed from drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h)21
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/i915_pciids.h32
-rw-r--r--include/drm/intel-gtt.h3
-rw-r--r--include/drm/spsc_queue.h122
-rw-r--r--include/drm/tinydrm/mipi-dbi.h4
-rw-r--r--include/drm/tinydrm/tinydrm.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h169
-rw-r--r--include/drm/ttm/ttm_bo_driver.h157
-rw-r--r--include/drm/ttm/ttm_memory.h75
-rw-r--r--include/drm/ttm/ttm_page_alloc.h11
-rw-r--r--include/linux/dma-fence-array.h3
-rw-r--r--include/linux/dma-fence.h2
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/linux/reservation.h23
-rw-r--r--include/uapi/drm/amdgpu_drm.h12
-rw-r--r--include/uapi/drm/drm_fourcc.h38
-rw-r--r--include/uapi/drm/exynos_drm.h192
-rw-r--r--include/uapi/drm/i915_drm.h77
-rw-r--r--include/uapi/linux/kfd_ioctl.h15
-rw-r--r--include/uapi/linux/vfio.h62
-rw-r--r--include/video/imx-ipu-v3.h2
1020 files changed, 57481 insertions, 96977 deletions
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
index 7f040edc16fe..bf4a18047309 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
@@ -48,6 +48,10 @@ Required properties:
Documentation/devicetree/bindings/reset/reset.txt,
the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy"
+Optional properties:
+- hdmi-supply: Optional phandle to an external 5V regulator to power the HDMI
+ logic, as described in the file ../regulator/regulator.txt
+
Required nodes:
The connections to the HDMI ports are modeled using the OF graph
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
index 00f74bad1e95..057b81335775 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -64,6 +64,10 @@ Required properties:
- reg-names: should contain the names of the previous memory regions
- interrupts: should contain the VENC Vsync interrupt number
+Optional properties:
+- power-domains: Optional phandle to associated power domain as described in
+ the file ../power/power_domain.txt
+
Required nodes:
The connections to the VPU output video ports are modeled using the OF graph
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9225.txt b/Documentation/devicetree/bindings/display/ilitek,ili9225.txt
new file mode 100644
index 000000000000..a59feb52015b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9225.txt
@@ -0,0 +1,25 @@
+Ilitek ILI9225 display panels
+
+This binding is for display panels using an Ilitek ILI9225 controller in SPI
+mode.
+
+Required properties:
+- compatible: "vot,v220hf01a-t", "ilitek,ili9225"
+- rs-gpios: Register select signal
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+
+Example:
+ display@0{
+ compatible = "vot,v220hf01a-t", "ilitek,ili9225";
+ reg = <0>;
+ spi-max-frequency = <12000000>;
+ rs-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt
new file mode 100644
index 000000000000..3d5ce6ad6ec7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.txt
@@ -0,0 +1,49 @@
+Ilitek ILI9322 TFT panel driver with SPI control bus
+
+This is a driver for 320x240 TFT panels, accepting a variety of input
+streams that get adapted and scaled to the panel. The panel output has
+960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
+VCOMH outputs.
+
+Required properties:
+ - compatible: "dlink,dir-685-panel", "ilitek,ili9322"
+ (full system-specific compatible is always required to look up configuration)
+ - reg: address of the panel on the SPI bus
+
+Optional properties:
+ - vcc-supply: core voltage supply, see regulator/regulator.txt
+ - iovcc-supply: voltage supply for the interface input/output signals,
+ see regulator/regulator.txt
+ - vci-supply: voltage supply for analog parts, see regulator/regulator.txt
+ - reset-gpios: a GPIO spec for the reset pin, see gpio/gpio.txt
+
+ The following optional properties only apply to RGB and YUV input modes and
+ can be omitted for BT.656 input modes:
+
+ - pixelclk-active: see display/panel/display-timing.txt
+ - de-active: see display/panel/display-timing.txt
+ - hsync-active: see display/panel/display-timing.txt
+ - vsync-active: see display/panel/display-timing.txt
+
+The panel must obey the rules for a SPI slave device as specified in
+spi/spi-bus.txt
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
+
+Example:
+
+panel: display@0 {
+ compatible = "dlink,dir-685-panel", "ilitek,ili9322";
+ reg = <0>;
+ vcc-supply = <&vdisp>;
+ iovcc-supply = <&vdisp>;
+ vci-supply = <&vdisp>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
new file mode 100644
index 000000000000..7d8f6eeef6d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
@@ -0,0 +1,7 @@
+Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "mitsubishi,aa070mc01-ca1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt
index ec52c472c845..557fa765adcb 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.txt
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt
@@ -78,6 +78,16 @@ used for panels that implement compatible control signals.
while active. Active high reset signals can be supported by inverting the
GPIO specifier polarity flag.
+Power
+-----
+
+- power-supply: display panels require power to be supplied. While several
+ panels need more than one power supply with panel-specific constraints
+ governing the order and timings of the power supplies, in many cases a single
+ power supply is sufficient, either because the panel has a single power rail,
+ or because all its power rails can be driven by the same supply. In that case
+ the power-supply property specifies the supply powering the panel as a phandle
+ to a regulator.
Backlight
---------
diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.txt b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt
index b938269f841e..250850a2150b 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-lvds.txt
+++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt
@@ -32,6 +32,7 @@ Optional properties:
- label: See panel-common.txt.
- gpios: See panel-common.txt.
- backlight: See panel-common.txt.
+- power-supply: See panel-common.txt.
- data-mirror: If set, reverse the bit order described in the data mappings
below on all data lanes, transmitting bits for slots 6 to 0 instead of
0 to 6.
diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
index 1341bbf4aa3d..16d8ff088b7d 100644
--- a/Documentation/devicetree/bindings/display/panel/simple-panel.txt
+++ b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
@@ -1,7 +1,7 @@
Simple display panel
Required properties:
-- power-supply: regulator to provide the supply voltage
+- power-supply: See panel-common.txt
Optional properties:
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
new file mode 100644
index 000000000000..b25261e63a6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
@@ -0,0 +1,29 @@
+Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "tianma,tm070rvhg71"
+- power-supply: single regulator to provide the supply voltage
+- backlight: phandle of the backlight device attached to the panel
+
+Required nodes:
+- port: LVDS port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX6Q based board
+
+ panel: panel-lvds0 {
+ compatible = "tianma,tm070rvhg71";
+ backlight = <&backlight_lvds>;
+ power-supply = <&reg_lvds>;
+
+ port {
+ panel_in_lvds0: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
index 7175dc3740ac..ed34253d9fb1 100644
--- a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
@@ -2,7 +2,7 @@ Toppoly TD028TTEC1 Panel
========================
Required properties:
-- compatible: "toppoly,td028ttec1"
+- compatible: "tpo,td028ttec1"
Optional properties:
- label: a symbolic name for the panel
@@ -14,7 +14,7 @@ Example
-------
lcd-panel: td028ttec1@0 {
- compatible = "toppoly,td028ttec1";
+ compatible = "tpo,td028ttec1";
reg = <0>;
spi-max-frequency = <100000>;
spi-cpol;
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 4bbd1e9bf3be..cd48aba3bc8c 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -3,6 +3,8 @@
Required Properties:
- compatible: must be one of the following.
+ - "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
+ - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -27,10 +29,10 @@ Required Properties:
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- - R8A779[0123456] use one functional clock per channel and one clock per
- LVDS encoder (if available). The functional clocks must be named "du.x"
- with "x" being the channel numerical index. The LVDS clocks must be
- named "lvds.x" with "x" being the LVDS encoder numerical index.
+ - All other DU instances use one functional clock per channel and one
+ clock per LVDS encoder (if available). The functional clocks must be
+ named "du.x" with "x" being the channel numerical index. The LVDS clocks
+ must be named "lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
@@ -49,16 +51,18 @@ bindings specified in Documentation/devicetree/bindings/graph.txt.
The following table lists for each supported model the port number
corresponding to each DU output.
- Port 0 Port1 Port2 Port3
+ Port0 Port1 Port2 Port3
-----------------------------------------------------------------------------
- R8A7779 (H1) DPAD 0 DPAD 1 - -
- R8A7790 (H2) DPAD LVDS 0 LVDS 1 -
- R8A7791 (M2-W) DPAD LVDS 0 - -
- R8A7792 (V2H) DPAD 0 DPAD 1 - -
- R8A7793 (M2-N) DPAD LVDS 0 - -
- R8A7794 (E2) DPAD 0 DPAD 1 - -
- R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS
- R8A7796 (M3-W) DPAD HDMI LVDS -
+ R8A7743 (RZ/G1M) DPAD 0 LVDS 0 - -
+ R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
+ R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
+ R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
+ R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
+ R8A7792 (R-Car V2H) DPAD 0 DPAD 1 - -
+ R8A7793 (R-Car M2-N) DPAD 0 LVDS 0 - -
+ R8A7794 (R-Car E2) DPAD 0 DPAD 1 - -
+ R8A7795 (R-Car H3) DPAD 0 HDMI 0 HDMI 1 LVDS 0
+ R8A7796 (R-Car M3-W) DPAD 0 HDMI 0 LVDS 0 -
Example: R8A7795 (R-Car H3) ES2.0 DU
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 5d835d9c1ba8..eeda3597011e 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -7,6 +7,7 @@ buffer to an external LCD interface.
Required properties:
- compatible: value should be one of the following
"rockchip,rk3036-vop";
+ "rockchip,rk3126-vop";
"rockchip,rk3288-vop";
"rockchip,rk3368-vop";
"rockchip,rk3366-vop";
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
new file mode 100644
index 000000000000..f0a5090a3326
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
@@ -0,0 +1,35 @@
+Sitronix ST7735R display panels
+
+This binding is for display panels using a Sitronix ST7735R controller in SPI
+mode.
+
+Required properties:
+- compatible: "jianda,jd-t18003-t01", "sitronix,st7735r"
+- dc-gpios: Display data/command selection (D/CX)
+- reset-gpios: Reset signal (RSTX)
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
+ }
+
+ ...
+
+ display@0{
+ compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ backlight = &backlight;
+ };
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
index 74b5ac7b26d6..029252253ad4 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
@@ -10,7 +10,11 @@
- "lcd" for the clock feeding the output pixel clock & IP clock.
- resets: reset to be used by the device (defined by use of RCC macro).
Required nodes:
- - Video port for RGB output.
+ - Video port for DPI RGB output: ltdc has one video port with up to 2
+ endpoints:
+ - for external dpi rgb panel or bridge, using gpios.
+ - for internal dpi input of the MIPI DSI host controller.
+ Note: These 2 endpoints cannot be activated simultaneously.
* STMicroelectronics STM32 DSI controller specific extensions to Synopsys
DesignWare MIPI DSI host controller
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 50cc72ee1168..cd626ee1147a 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -93,6 +93,7 @@ Required properties:
* allwinner,sun6i-a31s-tcon
* allwinner,sun7i-a20-tcon
* allwinner,sun8i-a33-tcon
+ * allwinner,sun8i-a83t-tcon-lcd
* allwinner,sun8i-v3s-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@@ -121,6 +122,14 @@ Required properties:
On SoCs other than the A33 and V3s, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
+On SoCs that support LVDS (all SoCs but the A13, H3, H5 and V3s), you
+need one more reset line:
+ - 'lvds': The reset line driving the LVDS logic
+
+And on the A23, A31, A31s and A33, you need one more clock line:
+ - 'lvds-alt': An alternative clock source, separate from the TCON channel 0
+ clock, that can be used to drive the LVDS clock
+
DRC
---
@@ -216,6 +225,7 @@ supported.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun8i-a83t-de2-mixer-0
* allwinner,sun8i-v3s-de2-mixer
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the mixer
@@ -245,6 +255,7 @@ Required properties:
* allwinner,sun6i-a31s-display-engine
* allwinner,sun7i-a20-display-engine
* allwinner,sun8i-a33-display-engine
+ * allwinner,sun8i-a83t-display-engine
* allwinner,sun8i-v3s-display-engine
- allwinner,pipelines: list of phandle to the display engine
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index 844e0103fb0d..593be44a53c9 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -206,21 +206,33 @@ of the following host1x client modules:
- "nvidia,tegra132-sor": for Tegra132
- "nvidia,tegra210-sor": for Tegra210
- "nvidia,tegra210-sor1": for Tegra210
+ - "nvidia,tegra186-sor": for Tegra186
+ - "nvidia,tegra186-sor1": for Tegra186
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- sor: clock input for the SOR hardware
- - source: source clock for the SOR clock
+ - out: SOR output clock
- parent: input for the pixel clock
- dp: reference clock for the SOR clock
- safe: safe reference for the SOR clock during power up
+
+ For Tegra186 and later:
+ - pad: SOR pad output clock (on Tegra186 and later)
+
+ Obsolete:
+ - source: source clock for the SOR clock (obsolete, use "out" instead)
+
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following entries:
- sor
+ Required properties on Tegra186 and later:
+ - nvidia,interface: index of the SOR interface
+
Optional properties:
- nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
diff --git a/Documentation/devicetree/bindings/display/ti/ti,dra7-dss.txt b/Documentation/devicetree/bindings/display/ti/ti,dra7-dss.txt
index c30f9ec189ed..91279f1060fe 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,dra7-dss.txt
+++ b/Documentation/devicetree/bindings/display/ti/ti,dra7-dss.txt
@@ -47,6 +47,11 @@ Required properties:
- clocks: handle to fclk
- clock-names: "fck"
+Optional properties:
+- max-memory-bandwidth: Input memory (from main memory to dispc) bandwidth limit
+ in bytes per second
+
+
HDMI
----
diff --git a/Documentation/devicetree/bindings/display/ti/ti,omap2-dss.txt b/Documentation/devicetree/bindings/display/ti/ti,omap2-dss.txt
index afcd5a86c6a4..ee867c4d1152 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,omap2-dss.txt
+++ b/Documentation/devicetree/bindings/display/ti/ti,omap2-dss.txt
@@ -28,6 +28,10 @@ Required properties:
- ti,hwmods: "dss_dispc"
- interrupts: the DISPC interrupt
+Optional properties:
+- max-memory-bandwidth: Input memory (from main memory to dispc) bandwidth limit
+ in bytes per second
+
RFBI
----
diff --git a/Documentation/devicetree/bindings/display/ti/ti,omap3-dss.txt b/Documentation/devicetree/bindings/display/ti/ti,omap3-dss.txt
index dc66e1447c31..cd02516a40b6 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,omap3-dss.txt
+++ b/Documentation/devicetree/bindings/display/ti/ti,omap3-dss.txt
@@ -37,6 +37,10 @@ Required properties:
- clocks: handle to fclk
- clock-names: "fck"
+Optional properties:
+- max-memory-bandwidth: Input memory (from main memory to dispc) bandwidth limit
+ in bytes per second
+
RFBI
----
diff --git a/Documentation/devicetree/bindings/display/ti/ti,omap4-dss.txt b/Documentation/devicetree/bindings/display/ti/ti,omap4-dss.txt
index bc624db8888d..0f85f6b3a5a8 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,omap4-dss.txt
+++ b/Documentation/devicetree/bindings/display/ti/ti,omap4-dss.txt
@@ -36,6 +36,10 @@ Required properties:
- clocks: handle to fclk
- clock-names: "fck"
+Optional properties:
+- max-memory-bandwidth: Input memory (from main memory to dispc) bandwidth limit
+ in bytes per second
+
RFBI
----
diff --git a/Documentation/devicetree/bindings/display/ti/ti,omap5-dss.txt b/Documentation/devicetree/bindings/display/ti/ti,omap5-dss.txt
index 118a486c47bb..20861218649f 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,omap5-dss.txt
+++ b/Documentation/devicetree/bindings/display/ti/ti,omap5-dss.txt
@@ -36,6 +36,10 @@ Required properties:
- clocks: handle to fclk
- clock-names: "fck"
+Optional properties:
+- max-memory-bandwidth: Input memory (from main memory to dispc) bandwidth limit
+ in bytes per second
+
RFBI
----
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 60f56762698b..b1fa64a1d4d0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -156,6 +156,7 @@ i2se I2SE GmbH
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
ifi Ingenieurburo Fur Ic-Technologie (I/F/I)
+ilitek ILI Technology Corporation (ILITEK)
img Imagination Technologies Ltd.
infineon Infineon Technologies
inforce Inforce Computing
@@ -174,6 +175,7 @@ itead ITEAD Intelligent Systems Co.Ltd
iwave iWave Systems Technologies Pvt. Ltd.
jdi Japan Display Inc.
jedec JEDEC Solid State Technology Association
+jianda Jiandangjing Technology Co., Ltd.
karo Ka-Ro electronics GmbH
keithkoep Keith & Koep GmbH
keymile Keymile GmbH
@@ -383,6 +385,7 @@ virtio Virtual I/O Device Specification, developed by the OASIS consortium
vivante Vivante Corporation
vocore VoCore Studio
voipac Voipac Technologies s.r.o.
+vot Vision Optical Technology Co., Ltd.
wd Western Digital Corp.
wetek WeTek Electronics, limited.
wexler Wexler
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 13dd237418cc..e37557b30f62 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -74,15 +74,6 @@ Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:export:
-Legacy CRTC/Modeset Helper Functions Reference
-==============================================
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
- :doc: overview
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
- :export:
-
Simple KMS Helper Reference
===========================
@@ -163,6 +154,9 @@ Panel Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_panel.c
:export:
+.. kernel-doc:: drivers/gpu/drm/drm_panel_orientation_quirks.c
+ :export:
+
Display Port Helper Functions Reference
=======================================
@@ -279,15 +273,6 @@ Flip-work Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
:export:
-Plane Helper Reference
-======================
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
- :doc: overview
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
- :export:
-
Auxiliary Modeset Helpers
=========================
@@ -305,3 +290,21 @@ Framebuffer GEM Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
:export:
+
+Legacy Plane Helper Reference
+=============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+ :export:
+
+Legacy CRTC/Modeset Helper Functions Reference
+==============================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+ :export:
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 307284125d7a..2dcf5b42015d 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -263,14 +263,20 @@ Taken all together there's two consequences for the atomic design:
- An atomic update is assembled and validated as an entirely free-standing pile
of structures within the :c:type:`drm_atomic_state <drm_atomic_state>`
- container. Again drivers can subclass that container for their own state
- structure tracking needs. Only when a state is committed is it applied to the
- driver and modeset objects. This way rolling back an update boils down to
- releasing memory and unreferencing objects like framebuffers.
+ container. Driver private state structures are also tracked in the same
+ structure; see the next chapter. Only when a state is committed is it applied
+ to the driver and modeset objects. This way rolling back an update boils down
+ to releasing memory and unreferencing objects like framebuffers.
Read on in this chapter, and also in :ref:`drm_atomic_helper` for more detailed
coverage of specific topics.
+Handling Driver Private State
+-----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :doc: handling driver private state
+
Atomic Mode Setting Function Reference
--------------------------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index e94d3ac2bdd0..41dc881b00dc 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -347,10 +347,10 @@ GuC-specific firmware loader
GuC-based command submission
----------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c
:doc: GuC-based command submission
-.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c
:internal:
GuC Firmware Layout
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 36625aa66c27..1e593370f64f 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -179,8 +179,39 @@ don't do this, drivers used dev_info/warn/err to make this differentiation. We
now have DRM_DEV_* variants of the drm print macros, so we can start to convert
those drivers back to using drm-formwatted specific log messages.
+Before you start this conversion please contact the relevant maintainers to make
+sure your work will be merged - not everyone agrees that the DRM dmesg macros
+are better.
+
Contact: Sean Paul, Maintainer of the driver you plan to convert
+Convert drivers to use simple modeset suspend/resume
+----------------------------------------------------
+
+Most drivers (except i915 and nouveau) that use
+drm_atomic_helper_suspend/resume() can probably be converted to use
+drm_mode_config_helper_suspend/resume().
+
+Contact: Maintainer of the driver you plan to convert
+
+Convert drivers to use drm_fb_helper_fbdev_setup/teardown()
+-----------------------------------------------------------
+
+Most drivers can use drm_fb_helper_fbdev_setup() except maybe:
+
+- amdgpu which has special logic to decide whether to call
+ drm_helper_disable_unused_functions()
+
+- armada which isn't atomic and doesn't call
+ drm_helper_disable_unused_functions()
+
+- i915 which calls drm_fb_helper_initial_config() in a worker
+
+Drivers that use drm_framebuffer_remove() to clean up the fbdev framebuffer can
+probably use drm_fb_helper_fbdev_teardown().
+
+Contact: Maintainer of the driver you plan to convert
+
Core refactorings
=================
@@ -382,11 +413,6 @@ those drivers as simple as possible, so lots of room for refactoring:
one of the ideas for having a shared dsi/dbi helper, abstracting away the
transport details more.
-- tinydrm_lastclose could be drm_fb_helper_lastclose. Only thing we need
- for that is to store the drm_fb_helper pointer somewhere in
- drm_device->mode_config. And then we could roll that out to all the
- drivers.
-
- tinydrm_gem_cma_prime_import_sg_table should probably go into the cma
helpers, as a _vmapped variant (since not every driver needs the vmap).
And tinydrm_gem_cma_free_object could the be merged into
@@ -400,11 +426,6 @@ those drivers as simple as possible, so lots of room for refactoring:
a drm_device wrong. Doesn't matter, since everyone else gets it wrong
too :-)
-- With the fbdev pointer in dev->mode_config we could also make
- suspend/resume helpers entirely generic, at least if we add a
- dev->mode_config.suspend_state. We could even provide a generic pm_ops
- structure with those.
-
- also rework the drm_framebuffer_funcs->dirty hook wire-up, see above.
Contact: Noralf Trønnes, Daniel Vetter
diff --git a/MAINTAINERS b/MAINTAINERS
index 2bd08becdd16..4c104dbc746d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4486,6 +4486,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
+DRM DRIVER FOR ILITEK ILI9225 PANELS
+M: David Lechner <david@lechnology.com>
+S: Maintained
+F: drivers/gpu/drm/tinydrm/ili9225.c
+F: Documentation/devicetree/bindings/display/ili9225.txt
+
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
@@ -4572,6 +4578,12 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/st7586.c
F: Documentation/devicetree/bindings/display/st7586.txt
+DRM DRIVER FOR SITRONIX ST7735R PANELS
+M: David Lechner <david@lechnology.com>
+S: Maintained
+F: drivers/gpu/drm/tinydrm/st7735r.c
+F: Documentation/devicetree/bindings/display/st7735r.txt
+
DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
@@ -4611,7 +4623,7 @@ F: include/linux/vga*
DRM DRIVERS AND MISC GPU PATCHES
M: Daniel Vetter <daniel.vetter@intel.com>
-M: Jani Nikula <jani.nikula@linux.intel.com>
+M: Gustavo Padovan <gustavo@padovan.org>
M: Sean Paul <seanpaul@chromium.org>
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
@@ -4740,7 +4752,8 @@ F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVERS FOR ROCKCHIP
-M: Mark Yao <mark.yao@rock-chips.com>
+M: Sandy Huang <hjc@rock-chips.com>
+M: Heiko Stübner <heiko@sntech.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/rockchip/
@@ -4828,6 +4841,15 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/
F: include/drm/tinydrm/
+DRM TTM SUBSYSTEM
+M: Christian Koenig <christian.koenig@amd.com>
+M: Roger He <Hongbo.He@amd.com>
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Maintained
+L: dri-devel@lists.freedesktop.org
+F: include/drm/ttm/
+F: drivers/gpu/drm/ttm/
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -7056,7 +7078,7 @@ M: Zhi Wang <zhi.a.wang@intel.com>
L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
-T: git https://github.com/01org/gvt-linux.git
+T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
@@ -11426,6 +11448,7 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
+M: David (ChunMing) Zhou <David1.Zhou@amd.com>
L: amd-gfx@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index 7d87437bd030..3de0489deade 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -147,6 +147,18 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
/**
+ * iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
+ * notifier, unlocked
+ *
+ * Like iosf_mbi_unregister_pmic_bus_access_notifier(), but for use when the
+ * caller has already called iosf_mbi_punit_acquire() itself.
+ *
+ * @nb: notifier_block to unregister
+ */
+int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ struct notifier_block *nb);
+
+/**
* iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
*
* @val: action to pass into listener's notifier_call function
@@ -154,6 +166,11 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
*/
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
+/**
+ * iosf_mbi_assert_punit_acquired - Assert that the P-Unit has been acquired.
+ */
+void iosf_mbi_assert_punit_acquired(void);
+
#else /* CONFIG_IOSF_MBI is not enabled */
static inline
bool iosf_mbi_available(void)
@@ -197,12 +214,20 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
return 0;
}
+static inline int
+iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return 0;
}
+static inline void iosf_mbi_assert_punit_acquired(void) {}
+
#endif /* CONFIG_IOSF_MBI */
#endif /* IOSF_MBI_SYMS_H */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 1e82f787c160..bae0d32e327b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -243,7 +243,7 @@ static void __init intel_remapping_check(int num, int slot, int func)
#define KB(x) ((x) * 1024UL)
#define MB(x) (KB (KB (x)))
-static size_t __init i830_tseg_size(void)
+static resource_size_t __init i830_tseg_size(void)
{
u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
@@ -256,7 +256,7 @@ static size_t __init i830_tseg_size(void)
return KB(512);
}
-static size_t __init i845_tseg_size(void)
+static resource_size_t __init i845_tseg_size(void)
{
u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
@@ -273,7 +273,7 @@ static size_t __init i845_tseg_size(void)
return 0;
}
-static size_t __init i85x_tseg_size(void)
+static resource_size_t __init i85x_tseg_size(void)
{
u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
@@ -283,12 +283,12 @@ static size_t __init i85x_tseg_size(void)
return MB(1);
}
-static size_t __init i830_mem_size(void)
+static resource_size_t __init i830_mem_size(void)
{
return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32);
}
-static size_t __init i85x_mem_size(void)
+static resource_size_t __init i85x_mem_size(void)
{
return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32);
}
@@ -297,36 +297,36 @@ static size_t __init i85x_mem_size(void)
* On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*/
-static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
- size_t stolen_size)
+static resource_size_t __init i830_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
{
- return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
+ return i830_mem_size() - i830_tseg_size() - stolen_size;
}
-static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
- size_t stolen_size)
+static resource_size_t __init i845_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
{
- return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
+ return i830_mem_size() - i845_tseg_size() - stolen_size;
}
-static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
- size_t stolen_size)
+static resource_size_t __init i85x_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
{
- return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
+ return i85x_mem_size() - i85x_tseg_size() - stolen_size;
}
-static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
- size_t stolen_size)
+static resource_size_t __init i865_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
{
u16 toud = 0;
toud = read_pci_config_16(0, 0, 0, I865_TOUD);
- return (phys_addr_t)(toud << 16) + i845_tseg_size();
+ return toud * KB(64) + i845_tseg_size();
}
-static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
- size_t stolen_size)
+static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
{
u32 bsm;
@@ -337,10 +337,10 @@ static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
*/
bsm = read_pci_config(num, slot, func, INTEL_BSM);
- return (phys_addr_t)bsm & INTEL_BSM_MASK;
+ return bsm & INTEL_BSM_MASK;
}
-static size_t __init i830_stolen_size(int num, int slot, int func)
+static resource_size_t __init i830_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -361,7 +361,7 @@ static size_t __init i830_stolen_size(int num, int slot, int func)
return 0;
}
-static size_t __init gen3_stolen_size(int num, int slot, int func)
+static resource_size_t __init gen3_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -390,7 +390,7 @@ static size_t __init gen3_stolen_size(int num, int slot, int func)
return 0;
}
-static size_t __init gen6_stolen_size(int num, int slot, int func)
+static resource_size_t __init gen6_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -398,10 +398,10 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
- return (size_t)gms * MB(32);
+ return gms * MB(32);
}
-static size_t __init gen8_stolen_size(int num, int slot, int func)
+static resource_size_t __init gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -409,10 +409,10 @@ static size_t __init gen8_stolen_size(int num, int slot, int func)
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
- return (size_t)gms * MB(32);
+ return gms * MB(32);
}
-static size_t __init chv_stolen_size(int num, int slot, int func)
+static resource_size_t __init chv_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -426,14 +426,14 @@ static size_t __init chv_stolen_size(int num, int slot, int func)
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
if (gms < 0x11)
- return (size_t)gms * MB(32);
+ return gms * MB(32);
else if (gms < 0x17)
- return (size_t)(gms - 0x11 + 2) * MB(4);
+ return (gms - 0x11) * MB(4) + MB(8);
else
- return (size_t)(gms - 0x17 + 9) * MB(4);
+ return (gms - 0x17) * MB(4) + MB(36);
}
-static size_t __init gen9_stolen_size(int num, int slot, int func)
+static resource_size_t __init gen9_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
u16 gms;
@@ -444,14 +444,15 @@ static size_t __init gen9_stolen_size(int num, int slot, int func)
/* 0x0 to 0xef: 32MB increments starting at 0MB */
/* 0xf0 to 0xfe: 4MB increments starting at 4MB */
if (gms < 0xf0)
- return (size_t)gms * MB(32);
+ return gms * MB(32);
else
- return (size_t)(gms - 0xf0 + 1) * MB(4);
+ return (gms - 0xf0) * MB(4) + MB(4);
}
struct intel_early_ops {
- size_t (*stolen_size)(int num, int slot, int func);
- phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
+ resource_size_t (*stolen_size)(int num, int slot, int func);
+ resource_size_t (*stolen_base)(int num, int slot, int func,
+ resource_size_t size);
};
static const struct intel_early_ops i830_early_ops __initconst = {
@@ -527,16 +528,20 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
+ INTEL_CFL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
};
+struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
+EXPORT_SYMBOL(intel_graphics_stolen_res);
+
static void __init
intel_graphics_stolen(int num, int slot, int func,
const struct intel_early_ops *early_ops)
{
- phys_addr_t base, end;
- size_t size;
+ resource_size_t base, size;
+ resource_size_t end;
size = early_ops->stolen_size(num, slot, func);
base = early_ops->stolen_base(num, slot, func, size);
@@ -545,8 +550,12 @@ intel_graphics_stolen(int num, int slot, int func,
return;
end = base + size - 1;
- printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
- &base, &end);
+
+ intel_graphics_stolen_res.start = base;
+ intel_graphics_stolen_res.end = end;
+
+ printk(KERN_INFO "Reserving Intel graphics memory at %pR\n",
+ &intel_graphics_stolen_res);
/* Mark this space as reserved */
e820__range_add(base, size, E820_TYPE_RESERVED);
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index a952ac199741..6f37a2137a79 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -218,14 +218,23 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
+int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ struct notifier_block *nb)
+{
+ iosf_mbi_assert_punit_acquired();
+
+ return blocking_notifier_chain_unregister(
+ &iosf_mbi_pmic_bus_access_notifier, nb);
+}
+EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
+
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;
/* Wait for the bus to go inactive before unregistering */
mutex_lock(&iosf_mbi_punit_mutex);
- ret = blocking_notifier_chain_unregister(
- &iosf_mbi_pmic_bus_access_notifier, nb);
+ ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
mutex_unlock(&iosf_mbi_punit_mutex);
return ret;
@@ -239,6 +248,12 @@ int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
}
EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);
+void iosf_mbi_assert_punit_acquired(void)
+{
+ WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex));
+}
+EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
+
#ifdef CONFIG_IOSF_MBI_DEBUG
static u32 dbg_mdr;
static u32 dbg_mcr;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 784ff0003c1b..29b0eb452b3a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -231,6 +231,7 @@ config DMA_SHARED_BUFFER
bool
default n
select ANON_INODES
+ select IRQ_WORK
help
This option enables the framework for buffer-sharing between
multiple drivers. A buffer is associated with a file using driver
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9b6b6023193b..c6271ce250b3 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -80,7 +80,7 @@ static struct _intel_private {
unsigned int needs_dmar : 1;
phys_addr_t gma_bus_addr;
/* Size of memory reserved for graphics by the BIOS */
- unsigned int stolen_size;
+ resource_size_t stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
@@ -333,13 +333,13 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
-static unsigned int intel_gtt_stolen_size(void)
+static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- unsigned int stolen_size = 0;
+ resource_size_t stolen_size = 0;
if (INTEL_GTT_GEN == 1)
return 0; /* no stolen mem on i81x */
@@ -417,8 +417,8 @@ static unsigned int intel_gtt_stolen_size(void)
}
if (stolen_size > 0) {
- dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
- stolen_size / KB(1), local ? "local" : "stolen");
+ dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
+ (u64)stolen_size / KB(1), local ? "local" : "stolen");
} else {
dev_info(&intel_private.bridge_dev->dev,
"no pre-allocated video memory detected\n");
@@ -872,6 +872,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
}
wmb();
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
@@ -1422,12 +1424,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
EXPORT_SYMBOL(intel_gmch_probe);
void intel_gtt_get(u64 *gtt_total,
- u32 *stolen_size,
phys_addr_t *mappable_base,
- u64 *mappable_end)
+ resource_size_t *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
- *stolen_size = intel_private.stolen_size;
*mappable_base = intel_private.gma_bus_addr;
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 12b62d0aac27..539450713838 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -351,13 +351,13 @@ static inline int is_dma_buf_file(struct file *file)
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
* to share with: First the filedescriptor is converted to a &dma_buf using
- * dma_buf_get(). The the buffer is attached to the device using
+ * dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*
* Up to this stage the exporter is still free to migrate or reallocate the
* backing storage.
*
- * 3. Once the buffer is attached to all devices userspace can inniate DMA
+ * 3. Once the buffer is attached to all devices userspace can initiate DMA
* access to the shared buffer. In the kernel this is done by calling
* dma_buf_map_attachment() and dma_buf_unmap_attachment().
*
@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
- * A mapping must be unmapped again using dma_buf_map_attachment(). Note that
+ * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
* time.
@@ -1179,8 +1179,7 @@ static int dma_buf_init_debugfs(void)
static void dma_buf_uninit_debugfs(void)
{
- if (dma_buf_debugfs_dir)
- debugfs_remove_recursive(dma_buf_debugfs_dir);
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
}
#else
static inline int dma_buf_init_debugfs(void)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 0350829ba62e..dd1edfb27b61 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+ struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
+}
+
static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- dma_fence_signal(&array->base);
- dma_fence_put(&array->base);
+ irq_work_queue(&array->work);
+ else
+ dma_fence_put(&array->base);
}
static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
@@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
spin_lock_init(&array->lock);
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
+ init_irq_work(&array->work, irq_dma_fence_array_work);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index b44d9d7db347..04ebe2204c12 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -104,7 +104,8 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct dma_fence *fence)
{
- u32 i;
+ struct dma_fence *signaled = NULL;
+ u32 i, signaled_idx;
dma_fence_get(fence);
@@ -126,17 +127,28 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
dma_fence_put(old_fence);
return;
}
+
+ if (!signaled && dma_fence_is_signaled(old_fence)) {
+ signaled = old_fence;
+ signaled_idx = i;
+ }
}
/*
* memory barrier is added by write_seqcount_begin,
* fobj->shared_count is protected by this lock too
*/
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ if (signaled) {
+ RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+ } else {
+ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+ fobj->shared_count++;
+ }
write_seqcount_end(&obj->seq);
preempt_enable();
+
+ dma_fence_put(signaled);
}
static void
@@ -145,8 +157,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct dma_fence *fence)
{
- unsigned i;
- struct dma_fence *old_fence = NULL;
+ unsigned i, j, k;
dma_fence_get(fence);
@@ -162,24 +173,21 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* references from the old struct are carried over to
* the new.
*/
- fobj->shared_count = old->shared_count;
-
- for (i = 0; i < old->shared_count; ++i) {
+ for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
- if (!old_fence && check->context == fence->context) {
- old_fence = check;
- RCU_INIT_POINTER(fobj->shared[i], fence);
- } else
- RCU_INIT_POINTER(fobj->shared[i], check);
- }
- if (!old_fence) {
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ if (check->context == fence->context ||
+ dma_fence_is_signaled(check))
+ RCU_INIT_POINTER(fobj->shared[--k], check);
+ else
+ RCU_INIT_POINTER(fobj->shared[j++], check);
}
+ fobj->shared_count = j;
+ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+ fobj->shared_count++;
done:
preempt_disable();
@@ -192,10 +200,18 @@ done:
write_seqcount_end(&obj->seq);
preempt_enable();
- if (old)
- kfree_rcu(old, rcu);
+ if (!old)
+ return;
- dma_fence_put(old_fence);
+ /* Drop the references to the signaled fences */
+ for (i = k; i < fobj->shared_max; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(obj));
+ dma_fence_put(f);
+ }
+ kfree_rcu(old, rcu);
}
/**
@@ -318,7 +334,7 @@ retry:
continue;
}
- dst_list->shared[dst_list->shared_count++] = fence;
+ rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
}
} else {
dst_list = NULL;
@@ -455,13 +471,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
unsigned long timeout)
{
struct dma_fence *fence;
- unsigned seq, shared_count, i = 0;
+ unsigned seq, shared_count;
long ret = timeout ? timeout : 1;
+ int i;
retry:
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
+ i = -1;
fence = rcu_dereference(obj->fence_excl);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
@@ -477,14 +495,14 @@ retry:
fence = NULL;
}
- if (!fence && wait_all) {
+ if (wait_all) {
struct reservation_object_list *fobj =
rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
- for (i = 0; i < shared_count; ++i) {
+ for (i = 0; !fence && i < shared_count; ++i) {
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4d9f21831741..deeefa7a1773 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
+ select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
select FB_CMDLINE
select I2C
@@ -149,6 +150,10 @@ config DRM_VM
bool
depends on DRM && MMU
+config DRM_SCHED
+ tristate
+ depends on DRM
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -178,6 +183,7 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_SCHED
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -362,6 +368,10 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
+config DRM_PANEL_ORIENTATION_QUIRKS
+ tristate
+
config DRM_LIB_RANDOM
bool
default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e9500844333e..50093ff4479b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -47,8 +47,10 @@ obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
+obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-y += amd/lib/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 90202cf4cd1e..d6e5b7273853 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -52,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \
+ amdgpu_ids.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -62,7 +63,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block
amdgpu-y += \
@@ -135,10 +136,7 @@ amdgpu-y += \
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
-amdgpu-y += \
- ../scheduler/gpu_scheduler.o \
- ../scheduler/sched_fence.o \
- amdgpu_job.o
+amdgpu-y += amdgpu_job.o
# ACP componet
ifneq ($(CONFIG_DRM_AMD_ACP),)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0b14b5373783..d5a2eefd6c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -45,8 +45,11 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
+#include "dm_pp_interface.h"
+#include "kgd_pp_interface.h"
#include "amd_shared.h"
#include "amdgpu_mode.h"
@@ -59,7 +62,6 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_vm.h"
-#include "amd_powerplay.h"
#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "amdgpu_uvd.h"
@@ -67,10 +69,9 @@
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "amdgpu_dm.h"
-
-#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
+#include "amdgpu_debugfs.h"
/*
* Modules parameters.
@@ -125,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -177,6 +179,10 @@ extern int amdgpu_cik_support;
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
+/* GPU RESET flags */
+#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
+#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
+
struct amdgpu_device;
struct amdgpu_ib;
struct amdgpu_cs_parser;
@@ -218,17 +224,18 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags);
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
#define AMDGPU_MAX_IP_NUM 16
@@ -253,15 +260,16 @@ struct amdgpu_ip_block {
const struct amdgpu_ip_block_version *version;
};
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor);
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor);
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version);
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -341,8 +349,9 @@ struct amdgpu_gart_funcs {
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
- u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
+ uint32_t (*get_invalidate_req)(unsigned int vmid);
};
/* provided by the ih block */
@@ -368,9 +377,6 @@ struct amdgpu_dummy_page {
struct page *page;
dma_addr_t addr;
};
-int amdgpu_dummy_page_init(struct amdgpu_device *adev);
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
-
/*
* Clocks
@@ -418,7 +424,6 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
@@ -535,6 +540,7 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
+ bool translate_further;
};
/*
@@ -645,12 +651,6 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
-
/*
* IRQS.
*/
@@ -684,7 +684,7 @@ struct amdgpu_ib {
uint32_t flags;
};
-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
@@ -694,7 +694,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f);
/*
@@ -727,7 +727,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
struct dma_fence **fences;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_ctx {
@@ -735,14 +735,16 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
+ unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
- enum amd_sched_priority init_priority;
- enum amd_sched_priority override_priority;
+ enum drm_sched_priority init_priority;
+ enum drm_sched_priority override_priority;
struct mutex lock;
+ atomic_t guilty;
};
struct amdgpu_ctx_mgr {
@@ -760,7 +762,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -957,6 +959,7 @@ struct amdgpu_gfx_config {
};
struct amdgpu_cu_info {
+ uint32_t simd_per_cu;
uint32_t max_waves_per_simd;
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
@@ -1109,12 +1112,11 @@ struct amdgpu_cs_parser {
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
struct amdgpu_job {
- struct amd_sched_job base;
+ struct drm_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
- struct amdgpu_sync dep_sync;
struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs;
struct dma_fence *fence; /* the hw fence */
@@ -1123,7 +1125,7 @@ struct amdgpu_job {
void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
- unsigned vm_id;
+ unsigned vmid;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
@@ -1164,10 +1166,10 @@ struct amdgpu_wb {
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
};
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
-void amdgpu_get_pcie_info(struct amdgpu_device *adev);
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/*
* SDMA
@@ -1232,24 +1234,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
-/*
- * Debugfs
- */
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-
-#if defined(CONFIG_DEBUG_FS)
-int amdgpu_debugfs_init(struct drm_minor *minor);
-#endif
-
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
/*
* amdgpu smumgr functions
@@ -1404,8 +1388,6 @@ struct amdgpu_fw_vram_usage {
void *va;
};
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
-
/*
* CGS
*/
@@ -1421,6 +1403,87 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+/*
+ * amdgpu nbio functions
+ *
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+};
+
+
+/* Define the HW IP blocks will be used in driver , add more if necessary */
+enum amd_hw_ip_block_type {
+ GC_HWIP = 1,
+ HDP_HWIP,
+ SDMA0_HWIP,
+ SDMA1_HWIP,
+ MMHUB_HWIP,
+ ATHUB_HWIP,
+ NBIO_HWIP,
+ MP0_HWIP,
+ UVD_HWIP,
+ VCN_HWIP = UVD_HWIP,
+ VCE_HWIP,
+ DF_HWIP,
+ DCE_HWIP,
+ OSSSYS_HWIP,
+ SMUIO_HWIP,
+ PWR_HWIP,
+ NBIF_HWIP,
+ MAX_HWIP
+};
+
+#define HWIP_MAX_INSTANCE 6
+
+struct amd_powerplay {
+ struct cgs_device *cgs_device;
+ void *pp_handle;
+ const struct amd_ip_funcs *ip_funcs;
+ const struct amd_pm_funcs *pp_funcs;
+};
+
#define AMDGPU_RESET_MAGIC_NUM 64
struct amdgpu_device {
struct device *dev;
@@ -1606,6 +1669,11 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ const struct amdgpu_nbio_funcs *nbio_funcs;
+
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
@@ -1616,9 +1684,6 @@ struct amdgpu_device {
/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
- /* link all gtt */
- spinlock_t gtt_list_lock;
- struct list_head gtt_list;
/* keep an lru list of rings by HW IP */
struct list_head ring_lru_list;
spinlock_t ring_lru_list_lock;
@@ -1629,7 +1694,8 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
- bool in_sriov_reset;
+ bool in_gpu_reset;
+ struct mutex lock_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1773,7 +1839,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
@@ -1784,7 +1850,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
+#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -1823,22 +1889,25 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
/* Common functions */
-int amdgpu_gpu_reset(struct amdgpu_device *adev);
-bool amdgpu_need_backup(struct amdgpu_device *adev);
-void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_need_post(struct amdgpu_device *adev);
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job* job, bool force);
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base);
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc);
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -1872,7 +1941,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend(struct amdgpu_device *adev);
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index c04f44a90392..a29362f9ef41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -277,7 +277,7 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block *ip_block =
- amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5432af39a674..1d605e1c1d66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -85,7 +85,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
+ dev_dbg(adev->dev, "kfd not supported on this ASIC\n");
return;
}
@@ -93,6 +93,39 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
adev->pdev, kfd2kgd);
}
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /*
+ * The first num_doorbells are used by amdgpu.
+ * amdkfd takes whatever's left in the aperture.
+ */
+ if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = adev->doorbell.size;
+ *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
@@ -242,14 +275,34 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
kfree(mem);
}
-uint64_t get_vmem_size(struct kgd_dev *kgd)
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
- struct amdgpu_device *adev =
- (struct amdgpu_device *)kgd;
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
+ ~((1ULL << 32) - 1);
+ resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size;
+
+ memset(mem_info, 0, sizeof(*mem_info));
+ if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) {
+ mem_info->local_mem_size_public = adev->mc.visible_vram_size;
+ mem_info->local_mem_size_private = adev->mc.real_vram_size -
+ adev->mc.visible_vram_size;
+ } else {
+ mem_info->local_mem_size_public = 0;
+ mem_info->local_mem_size_private = adev->mc.real_vram_size;
+ }
+ mem_info->vram_width = adev->mc.vram_width;
- BUG_ON(kgd == NULL);
+ pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
+ &adev->mc.aper_base, &aper_limit,
+ mem_info->local_mem_size_public,
+ mem_info->local_mem_size_private);
- return adev->mc.real_vram_size;
+ if (amdgpu_sriov_vf(adev))
+ mem_info->mem_clk_max = adev->clock.default_mclk / 100;
+ else
+ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
}
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
@@ -265,6 +318,39 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- /* The sclk is in quantas of 10kHz */
- return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+ /* the sclk is in quantas of 10kHz */
+ if (amdgpu_sriov_vf(adev))
+ return adev->clock.default_sclk / 100;
+
+ return amdgpu_dpm_get_sclk(adev, false) / 100;
+}
+
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
+
+ memset(cu_info, 0, sizeof(*cu_info));
+ if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
+ return;
+
+ cu_info->cu_active_number = acu_info.number;
+ cu_info->cu_ao_mask = acu_info.ao_cu_mask;
+ memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
+ sizeof(acu_info.bitmap));
+ cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
+ cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+ cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+ cu_info->simd_per_cu = acu_info.simd_per_cu;
+ cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
+ cu_info->wave_front_size = acu_info.wave_front_size;
+ cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
+ cu_info->lds_size = acu_info.lds_size;
+}
+
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8d689ab7e429..2a519f9062ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -56,10 +56,13 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-uint64_t get_vmem_size(struct kgd_dev *kgd);
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
#define read_user_wptr(mmptr, wptr, dst) \
({ \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 1e3e9be7d77e..a9e6aea0e5f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -105,7 +105,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
@@ -166,17 +173,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -191,6 +200,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -375,7 +386,44 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (35+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
@@ -410,10 +458,17 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdma_rlc_rb_rptr);
+
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
@@ -423,8 +478,37 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
+
+ data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
return 0;
}
@@ -575,7 +659,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -588,10 +672,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
@@ -599,6 +682,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 056929b8ccd0..b127259d7d85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -45,7 +45,7 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-struct cik_sdma_rlc_registers;
+struct vi_sdma_mqd;
/*
* Register access functions
@@ -64,7 +64,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -125,17 +132,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -152,6 +161,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -268,9 +279,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
{
- return 0;
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
}
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -278,9 +295,9 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
{
- return (struct cik_sdma_rlc_registers *)mqd;
+ return (struct vi_sdma_mqd *)mqd;
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -358,8 +375,138 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (54+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_sdma_mqd *m;
+ unsigned long end_jiffies;
+ uint32_t sdma_base_addr;
+ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdmax_rlcx_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4+2+3+7)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
return 0;
}
@@ -388,7 +535,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t sdma_rlc_rb_cntl;
@@ -509,10 +656,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -523,18 +670,19 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f450b69323fa..bf872f694f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_i2c.h"
#include "atom.h"
@@ -690,12 +691,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
/* set a reasonable default for DP */
if (adev->clock.default_dispclk < 53900) {
- DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
- adev->clock.default_dispclk / 100);
+ DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n",
+ adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
} else if (adev->clock.default_dispclk <= 60000) {
- DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
- adev->clock.default_dispclk / 100);
+ DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
@@ -1699,7 +1700,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
@@ -1721,28 +1722,6 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
@@ -1798,7 +1777,7 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
#endif
}
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
+static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
@@ -1841,3 +1820,234 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg);
+ return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32_IO(reg, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg);
+ return r;
+}
+
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context) {
+ kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
+ kfree(adev->mode_info.atom_context);
+ adev->mode_info.atom_context = NULL;
+ kfree(adev->mode_info.atom_card_info);
+ adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ adev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = adev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (adev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+ if (!adev->mode_info.atom_context) {
+ amdgpu_atombios_fini(adev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ } else {
+ amdgpu_atombios_scratch_regs_init(adev);
+ amdgpu_atombios_allocate_fb_scratch(adev);
+ }
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index b0d5d1d7fdba..fd8f18074f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -195,9 +195,6 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
@@ -219,6 +216,7 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
+void amdgpu_atombios_fini(struct amdgpu_device *adev);
+int amdgpu_atombios_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51af0b68..e2c3c5ec42d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -14,6 +14,16 @@
#include "amd_acpi.h"
+#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
+
+struct amdgpu_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
@@ -35,6 +45,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
bool bridge_pm_usable;
+ unsigned int quirks;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
- /*
- * Disable legacy PM methods only when pcie port PM is usable,
- * otherwise the device might fail to power off or power on.
- */
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
+ printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+ atpx->functions.power_cntl = true;
+ atpx->is_hybrid = false;
+ } else {
+ printk("ATPX Hybrid Graphics\n");
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
}
atpx->dgpu_req_power_for_displays = false;
@@ -547,6 +564,30 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.get_client_id = amdgpu_atpx_get_client_id,
};
+static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+};
+
+static void amdgpu_atpx_get_quirks(struct pci_dev *pdev)
+{
+ const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device) {
+ amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+}
+
/**
* amdgpu_atpx_detect - detect whether we have PX
*
@@ -570,6 +611,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
@@ -579,6 +621,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
if (has_atpx && vga_count == 2) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 057e1ecd83ce..a5df80d50d44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -93,7 +93,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (amdgpu_need_post(adev))
+ if (amdgpu_device_need_post(adev))
return false;
adev->bios = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f2b72c7c6857..4466f3535e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -801,6 +801,11 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else
strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_RAVEN:
+ adev->pm.fw_version = info->version;
+ return 0;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
@@ -948,7 +953,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
(amdgpu_crtc->v_border * 2);
mode_info->vblank_time_us = vblank_lines * line_time_us;
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
mode_info = NULL;
}
}
@@ -958,7 +962,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
if (mode_info != NULL) {
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
- mode_info->ref_clock = adev->clock.spll.reference_freq;
}
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index df9cbc78e168..8ca3783f2deb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -358,7 +358,6 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
if (amdgpu_connector->edid) {
drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
- drm_edid_to_eld(connector, amdgpu_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(connector, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 57abf7abd7a9..e80fc38141b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -90,6 +90,12 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ /* skip guilty context job */
+ if (atomic_read(&p->ctx->guilty) == 1) {
+ ret = -ECANCELED;
+ goto free_chunk;
+ }
+
mutex_lock(&p->ctx->lock);
/* get chunks */
@@ -337,7 +343,12 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved, bytes_moved;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = false,
+ .resv = bo->tbo.resv
+ };
uint32_t domain;
int r;
@@ -367,15 +378,13 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
- p->bytes_moved += bytes_moved;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ p->bytes_moved += ctx.bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
- p->bytes_moved_vis += bytes_moved;
+ p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -390,6 +399,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo *validated)
{
uint32_t domain = validated->allowed_domains;
+ struct ttm_operation_ctx ctx = { true, false };
int r;
if (!p->evictable)
@@ -431,7 +441,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
p->bytes_moved += bytes_moved;
@@ -470,6 +480,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_bo_list_entry *lobj;
int r;
@@ -487,8 +498,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
- false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
@@ -678,7 +688,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (!r && p->uf_entry.robj) {
struct amdgpu_bo *uf = p->uf_entry.robj;
- r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
}
@@ -768,10 +778,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
struct amdgpu_bo *bo;
int i, r;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -781,7 +787,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync,
- fpriv->prt_va->last_pt_update);
+ fpriv->prt_va->last_pt_update, false);
if (r)
return r;
@@ -795,7 +801,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r)
return r;
}
@@ -818,7 +824,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r)
return r;
}
@@ -829,7 +835,11 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ r = amdgpu_vm_update_directories(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
if (r)
return r;
@@ -865,8 +875,8 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
struct amdgpu_cs_chunk *chunk;
+ uint64_t offset, va_start;
struct amdgpu_ib *ib;
- uint64_t offset;
uint8_t *kptr;
chunk = &p->chunks[i];
@@ -876,14 +886,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
- r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
- &aobj, &m);
+ va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
return r;
}
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+ if ((va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
@@ -896,7 +906,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
+ kptr += va_start - offset;
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
@@ -1033,8 +1043,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(ctx);
return r;
} else if (fence) {
- r = amdgpu_sync_fence(p->adev, &p->job->sync,
- fence);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
+ true);
dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
@@ -1053,7 +1063,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
return r;
@@ -1145,7 +1155,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
@@ -1168,7 +1178,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1194,11 +1204,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring,
- amd_sched_get_job_priority(&job->base));
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base);
+ drm_sched_entity_push_job(&job->base, entity);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
@@ -1570,6 +1579,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct amdgpu_bo_va_mapping **map)
{
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
int r;
@@ -1590,11 +1600,10 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
- false);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
- return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
+ return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c184468e2b2b..09d35051fdd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,10 +28,10 @@
#include "amdgpu_sched.h"
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
- if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
- if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
@@ -75,22 +75,23 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs);
+ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs, &ctx->guilty);
if (r)
goto failed;
}
@@ -103,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
+ drm_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -125,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
+ drm_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
@@ -136,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -216,11 +217,45 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
- if (ctx->reset_counter == reset_counter)
+ if (ctx->reset_counter_query == reset_counter)
out->state.reset_status = AMDGPU_CTX_NO_RESET;
else
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
- ctx->reset_counter = reset_counter;
+ ctx->reset_counter_query = reset_counter;
+
+ mutex_unlock(&mgr->lock);
+ return 0;
+}
+
+static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ union drm_amdgpu_ctx_out *out)
+{
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (!ctx) {
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ out->state.flags = 0x0;
+ out->state.hangs = 0x0;
+
+ if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
+
+ if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
+
+ if (atomic_read(&ctx->guilty))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
mutex_unlock(&mgr->lock);
return 0;
@@ -231,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -243,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == AMD_SCHED_PRIORITY_INVALID)
- priority = AMD_SCHED_PRIORITY_NORMAL;
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
+ priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -257,6 +292,9 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
+ case AMDGPU_CTX_OP_QUERY_STATE2:
+ r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ break;
default:
return -EINVAL;
}
@@ -347,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
- struct amd_sched_rq *rq;
- struct amd_sched_entity *entity;
+ struct drm_sched_rq *rq;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
- enum amd_sched_priority ctx_prio;
+ enum drm_sched_priority ctx_prio;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
@@ -369,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
- amd_sched_entity_set_rq(entity, rq);
+ drm_sched_entity_set_rq(entity, rq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
new file mode 100644
index 000000000000..ee76b468774a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <drm/drmP.h>
+#include <linux/debugfs.h>
+#include "amdgpu.h"
+
+/*
+ * Debugfs
+ */
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->debugfs_count; i++) {
+ if (adev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = adev->debugfs_count + 1;
+ if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ adev->debugfs[adev->debugfs_count].files = files;
+ adev->debugfs[adev->debugfs_count].num_files = nfiles;
+ adev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ adev->ddev->primary->debugfs_root,
+ adev->ddev->primary);
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ goto end;
+
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto end;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_PCIE(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_PCIE(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_SMC(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 3;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int idx, x, outsize, r, valuesize;
+ uint32_t values[16];
+
+ if (size & 3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (amdgpu_dpm == 0)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ valuesize = sizeof(values);
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ else
+ return -EINVAL;
+
+ if (size > valuesize)
+ return -EINVAL;
+
+ outsize = 0;
+ x = 0;
+ if (!r) {
+ while (size) {
+ r = put_user(values[x++], (int32_t *)buf);
+ buf += 4;
+ size -= 4;
+ outsize += 4;
+ }
+ }
+
+ return !r ? outsize : r;
+}
+
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_read,
+ .write = amdgpu_debugfs_regs_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+ "amdgpu_gca_config",
+ "amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | S_IRUGO, root,
+ adev, debugfs_regs[i]);
+ if (IS_ERR(ent)) {
+ for (j = 0; j < i; j++) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ return PTR_ERR(ent);
+ }
+
+ if (!i)
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ adev->debugfs_regs[i] = ent;
+ }
+
+ return 0;
+}
+
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ if (adev->debugfs_regs[i]) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ }
+}
+
+static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0, i;
+
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_park(ring->sched.thread);
+ }
+
+ seq_printf(m, "run ib test:\n");
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ seq_printf(m, "ib ring tests failed (%d).\n", r);
+ else
+ seq_printf(m, "ib ring tests passed.\n");
+
+ /* go on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_unpark(ring->sched.thread);
+ }
+
+ return 0;
+}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_debugfs_list[] = {
+ {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
+ {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
+ {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
+};
+
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
+ ARRAY_SIZE(amdgpu_debugfs_list));
+}
+
+#else
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 147822545252..8260d8073c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,57 +21,22 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#include "dm_services.h"
-#include "include/grph_object_id.h"
-
-static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
-{
- bool rc = true;
-
- switch (id.type) {
- case OBJECT_TYPE_UNKNOWN:
- rc = false;
- break;
- case OBJECT_TYPE_GPU:
- case OBJECT_TYPE_ENGINE:
- /* do NOT check for id.id == 0 */
- if (id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- default:
- if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- }
-
- return rc;
-}
-
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2)
-{
- if (false == dal_graphics_object_id_is_valid(id1)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id1'!\n", __func__);
- return false;
- }
-
- if (false == dal_graphics_object_id_is_valid(id2)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id2'!\n", __func__);
- return false;
- }
-
- if (id1.id == id2.id && id1.enum_id == id2.enum_id
- && id1.type == id2.type)
- return true;
-
- return false;
-}
-
-
+/*
+ * Debugfs
+ */
+struct amdgpu_debugfs {
+ const struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+int amdgpu_debugfs_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles);
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3573ecdb06ee..00a50cc5ec9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -64,11 +63,6 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
-
static const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -333,7 +327,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG();
}
-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
@@ -342,13 +336,13 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
(void **)&adev->vram_scratch.ptr);
}
-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
- * amdgpu_program_register_sequence - program an array of registers.
+ * amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
@@ -357,9 +351,9 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
- const u32 *registers,
- const u32 array_size)
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+ const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
@@ -383,7 +377,7 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
}
}
-void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
@@ -392,14 +386,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
* GPU doorbell aperture helpers function.
*/
/**
- * amdgpu_doorbell_init - Init doorbell driver information.
+ * amdgpu_device_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
@@ -410,6 +404,9 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
return 0;
}
+ if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
+ return -EINVAL;
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -429,66 +426,35 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
{
iounmap(adev->doorbell.ptr);
adev->doorbell.ptr = NULL;
}
-/**
- * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup amdkfd
- *
- * @adev: amdgpu_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
- *
- * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
- * takes doorbells required for its own rings and reports the setup to amdkfd.
- * amdgpu reserved doorbells are at the start of the doorbell aperture.
- */
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /*
- * The first num_doorbells are used by amdgpu.
- * amdkfd takes whatever's left in the aperture.
- */
- if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = adev->doorbell.base;
- *aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
+
/*
- * amdgpu_wb_*()
+ * amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
- * amdgpu_wb_fini - Disable Writeback and free memory
+ * amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
-static void amdgpu_wb_fini(struct amdgpu_device *adev)
+static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
@@ -499,7 +465,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -507,7 +473,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
-static int amdgpu_wb_init(struct amdgpu_device *adev)
+static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
@@ -533,7 +499,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_get - Allocate a wb entry
+ * amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
@@ -541,7 +507,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
@@ -555,61 +521,34 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
}
/**
- * amdgpu_wb_free - Free a wb entry
+ * amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
__clear_bit(wb >> 3, adev->wb.used);
}
/**
- * amdgpu_vram_location - try to find VRAM location
+ * amdgpu_device_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
- * as parameter (which is so far either PCI aperture address or
- * for IGP TOM base address).
- *
- * If there is not enough space to fit the unvisible VRAM in the 32bits
- * address space then we limit the VRAM size to the aperture.
- *
- * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
- * this shouldn't be a problem as we are using the PCI aperture as a reference.
- * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
- * not IGP.
- *
- * Note: we use mc_vram_size as on some board we need to program the mc to
- * cover the whole aperture even if VRAM size is inferior to aperture size
- * Novell bug 204882 + along with lots of ubuntu ones
- *
- * Note: when limiting vram it's safe to overwritte real_vram_size because
- * we are not in case where real_vram_size is inferior to mc_vram_size (ie
- * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
- * ones)
- *
- * Note: IGP TOM addr should be the same as the aperture addr, we don't
- * explicitly check for that though.
- *
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * as parameter.
*/
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
- if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
- dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
- mc->real_vram_size = mc->aper_size;
- mc->mc_vram_size = mc->aper_size;
- }
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
@@ -619,7 +558,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -630,7 +569,8 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
{
u64 size_af, size_bf;
@@ -647,93 +587,91 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
dev_warn(adev->dev, "limiting GTT\n");
mc->gart_size = size_af;
}
- mc->gart_start = mc->vram_end + 1;
+ /* VCE doesn't like it when BOs cross a 4GB segment, so align
+ * the GART base on a 4GB boundary as well.
+ */
+ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
-/*
- * Firmware Reservation functions
- */
/**
- * amdgpu_fw_reserve_vram_fini - free fw reserved vram
+ * amdgpu_device_resize_fb_bar - try to resize FB BAR
*
* @adev: amdgpu_device pointer
*
- * free fw reserved vram if it has been reserved.
+ * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
+ * to fail, but if any of the BARs is not accessible after the size we abort
+ * driver loading by returning -ENODEV.
*/
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
-}
+ u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
+ u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+ struct pci_bus *root;
+ struct resource *res;
+ unsigned i;
+ u16 cmd;
+ int r;
-/**
- * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from fw.
- */
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
-{
- int r = 0;
- u64 gpu_addr;
- u64 vram_size = adev->mc.visible_vram_size;
+ /* Bypass for VF */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
+ /* Check if the root BUS has 64bit memory resources */
+ root = adev->pdev->bus;
+ while (root->parent)
+ root = root->parent;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ pci_bus_for_each_resource(root, res, i) {
+ if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ res->start > 0x100000000ull)
+ break;
+ }
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
- PAGE_SIZE, true, 0,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ /* Trying to resize is pointless without a root hub window above 4GB */
+ if (!res)
+ return 0;
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), &gpu_addr);
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ /* Disable memory decoding while we change the BAR addresses and size */
+ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(adev->pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
- }
- return r;
+ /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ amdgpu_device_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ pci_release_resource(adev->pdev, 2);
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
-}
+ pci_release_resource(adev->pdev, 0);
+
+ r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ if (r == -ENOSPC)
+ DRM_INFO("Not enough PCI address space for a large BAR.");
+ else if (r && r != -ENOTSUPP)
+ DRM_ERROR("Problem resizing BAR0 (%d).", r);
+
+ pci_assign_unassigned_bus_resources(adev->pdev->bus);
+
+ /* When the doorbell or fb BAR isn't available we have no chance of
+ * using the device.
+ */
+ r = amdgpu_device_doorbell_init(adev);
+ if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
+ return -ENODEV;
+
+ pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
+ return 0;
+}
/*
* GPU helpers function.
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
@@ -741,7 +679,7 @@ error_create:
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -786,285 +724,9 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
return true;
}
-/**
- * amdgpu_dummy_page_init - init dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page used by the driver (all asics).
- * This dummy page is used by the driver as a filler for gart entries
- * when pages are taken out of the GART
- * Returns 0 on sucess, -ENOMEM on failure.
- */
-int amdgpu_dummy_page_init(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page)
- return 0;
- adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
- if (adev->dummy_page.page == NULL)
- return -ENOMEM;
- adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * amdgpu_dummy_page_fini - free dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the dummy page used by the driver (all asics).
- */
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page == NULL)
- return;
- pci_unmap_page(adev->pdev, adev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
-}
-
-
-/* ATOM accessor methods */
-/*
- * ATOM is an interpreted byte code stored in tables in the vbios. The
- * driver registers callbacks to access registers and the interpreter
- * in the driver parses the tables and executes then to program specific
- * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
- * atombios.h, and atom.c
- */
-
-/**
- * cail_pll_read - read PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- * Returns the value of the PLL register.
- */
-static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_pll_write - write PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- * @val: value to write to the pll register
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- */
-static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_mc_read - read MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- *
- * Provides an MC register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MC register.
- */
-static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_mc_write - write MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- * @val: value to write to the pll register
- *
- * Provides a MC register accessor for the atom interpreter (r4xx+).
- */
-static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_reg_write - write MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- * @val: value to write to the pll register
- *
- * Provides a MMIO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32(reg, val);
-}
-
-/**
- * cail_reg_read - read MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- *
- * Provides an MMIO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MMIO register.
- */
-static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32(reg);
- return r;
-}
-
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
-static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct atom_context *ctx = adev->mode_info.atom_context;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
-}
-
-static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
- NULL);
-
-/**
- * amdgpu_atombios_fini - free the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the driver info and register access callbacks for the ATOM
- * interpreter (r4xx+).
- * Called at driver shutdown.
- */
-static void amdgpu_atombios_fini(struct amdgpu_device *adev)
-{
- if (adev->mode_info.atom_context) {
- kfree(adev->mode_info.atom_context->scratch);
- kfree(adev->mode_info.atom_context->iio);
- }
- kfree(adev->mode_info.atom_context);
- adev->mode_info.atom_context = NULL;
- kfree(adev->mode_info.atom_card_info);
- adev->mode_info.atom_card_info = NULL;
- device_remove_file(adev->dev, &dev_attr_vbios_version);
-}
-
-/**
- * amdgpu_atombios_init - init the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Initializes the driver info and register access callbacks for the
- * ATOM interpreter (r4xx+).
- * Returns 0 on sucess, -ENOMEM on failure.
- * Called at driver startup.
- */
-static int amdgpu_atombios_init(struct amdgpu_device *adev)
-{
- struct card_info *atom_card_info =
- kzalloc(sizeof(struct card_info), GFP_KERNEL);
- int ret;
-
- if (!atom_card_info)
- return -ENOMEM;
-
- adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
- atom_card_info->reg_read = cail_reg_read;
- atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
- atom_card_info->mc_read = cail_mc_read;
- atom_card_info->mc_write = cail_mc_write;
- atom_card_info->pll_read = cail_pll_read;
- atom_card_info->pll_write = cail_pll_write;
-
- adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
- if (!adev->mode_info.atom_context) {
- amdgpu_atombios_fini(adev);
- return -ENOMEM;
- }
-
- mutex_init(&adev->mode_info.atom_context->mutex);
- if (adev->is_atom_fw) {
- amdgpu_atomfirmware_scratch_regs_init(adev);
- amdgpu_atomfirmware_allocate_fb_scratch(adev);
- } else {
- amdgpu_atombios_scratch_regs_init(adev);
- amdgpu_atombios_allocate_fb_scratch(adev);
- }
-
- ret = device_create_file(adev->dev, &dev_attr_vbios_version);
- if (ret) {
- DRM_ERROR("Failed to create device file for VBIOS version\n");
- return ret;
- }
-
- return 0;
-}
-
/* if we get transitioned to only one device, take VGA back */
/**
- * amdgpu_vga_set_decode - enable/disable vga decode
+ * amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @cookie: amdgpu_device pointer
* @state: enable/disable vga decode
@@ -1072,7 +734,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
{
struct amdgpu_device *adev = cookie;
amdgpu_asic_set_vga_state(adev, state);
@@ -1083,7 +745,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static void amdgpu_check_block_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
@@ -1094,64 +756,32 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- goto def_value;
+ amdgpu_vm_block_size = -1;
}
-
- if (amdgpu_vm_block_size > 24 ||
- (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
- dev_warn(adev->dev, "VM page table size (%d) too large\n",
- amdgpu_vm_block_size);
- goto def_value;
- }
-
- return;
-
-def_value:
- amdgpu_vm_block_size = -1;
}
-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
/* no need to check the default value */
if (amdgpu_vm_size == -1)
return;
- if (!is_power_of_2(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- goto def_value;
- }
-
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
- goto def_value;
- }
-
- /*
- * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
- goto def_value;
+ amdgpu_vm_size = -1;
}
-
- return;
-
-def_value:
- amdgpu_vm_size = -1;
}
/**
- * amdgpu_check_arguments - validate module params
+ * amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_check_arguments(struct amdgpu_device *adev)
+static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
@@ -1184,9 +814,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_fragment_size = -1;
}
- amdgpu_check_vm_size(adev);
+ amdgpu_device_check_vm_size(adev);
- amdgpu_check_block_size(adev);
+ amdgpu_device_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!is_power_of_2(amdgpu_vram_page_split))) {
@@ -1194,6 +824,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+ if (amdgpu_lockup_timeout == 0) {
+ dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
+ amdgpu_lockup_timeout = 10000;
+ }
}
/**
@@ -1257,9 +892,9 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.can_switch = amdgpu_switcheroo_can_switch,
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
{
int i, r = 0;
@@ -1279,9 +914,9 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
return r;
}
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
{
int i, r = 0;
@@ -1301,7 +936,8 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
return r;
}
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int i;
@@ -1313,8 +949,8 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
}
}
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i, r;
@@ -1332,8 +968,8 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
}
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
@@ -1347,8 +983,9 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
}
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
@@ -1360,7 +997,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_version_cmp
+ * amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
@@ -1370,11 +1007,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor)
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor)
{
- struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
@@ -1385,7 +1022,7 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_add
+ * amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
@@ -1393,8 +1030,8 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version)
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
@@ -1550,7 +1187,7 @@ out:
return err;
}
-static int amdgpu_early_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1622,10 +1259,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ amdgpu_amdkfd_device_probe(adev);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
- return r;
+ return -EAGAIN;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1657,7 +1296,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1673,7 +1312,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- r = amdgpu_vram_scratch_init(adev);
+ r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
@@ -1683,9 +1322,9 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
- r = amdgpu_wb_init(adev);
+ r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
+ DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -1716,21 +1355,26 @@ static int amdgpu_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true;
}
+ amdgpu_amdkfd_device_init(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, true);
+
return 0;
}
-static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
+static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}
-static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
+static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
return !!memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM);
}
-static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1753,7 +1397,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_late_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1774,15 +1418,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
mod_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
return 0;
}
-static int amdgpu_fini(struct amdgpu_device *adev)
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_amdkfd_device_fini(adev);
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -1811,8 +1456,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- amdgpu_wb_fini(adev);
- amdgpu_vram_scratch_fini(adev);
+ amdgpu_free_static_csa(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
}
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1859,19 +1505,20 @@ static int amdgpu_fini(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, false);
+ if (amdgpu_virt_release_full_gpu(adev, false))
+ DRM_ERROR("failed to release exclusive mode on fini\n");
return 0;
}
-static void amdgpu_late_init_func_handler(struct work_struct *work)
+static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_cg_state(adev);
}
-int amdgpu_suspend(struct amdgpu_device *adev)
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int i, r;
@@ -1879,10 +1526,10 @@ int amdgpu_suspend(struct amdgpu_device *adev)
amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
- r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
@@ -1912,7 +1559,7 @@ int amdgpu_suspend(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1941,7 +1588,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1974,7 +1621,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase1(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
{
int i, r;
@@ -1997,7 +1644,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase2(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
{
int i, r;
@@ -2019,14 +1666,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
return r;
}
@@ -2163,8 +1810,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
+ mutex_init(&adev->lock_reset);
- amdgpu_check_arguments(adev);
+ amdgpu_device_check_arguments(adev);
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
@@ -2179,13 +1827,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
- INIT_LIST_HEAD(&adev->gtt_list);
- spin_lock_init(&adev->gtt_list_lock);
-
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
- INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -2205,7 +1851,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ amdgpu_device_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2219,17 +1865,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("PCI I/O BAR is not found.\n");
/* early init functions */
- r = amdgpu_early_init(adev);
+ r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_runtime_pm == 1)
- runtime = true;
if (amdgpu_device_is_px(ddev))
runtime = true;
if (!pci_is_thunderbolt_attached(adev->pdev))
@@ -2255,7 +1899,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
@@ -2267,8 +1911,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
dev_err(adev->dev, "gpu post error!\n");
goto failed;
}
- } else {
- DRM_INFO("GPU post is not needed\n");
}
if (adev->is_atom_fw) {
@@ -2303,11 +1945,23 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* init the mode config */
drm_mode_config_init(adev->ddev);
- r = amdgpu_init(adev);
+ r = amdgpu_device_ip_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_init failed\n");
+ /* failed in exclusive mode due to timeout */
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_mmio_blocked(adev) &&
+ !amdgpu_virt_wait_reset(adev)) {
+ dev_err(adev->dev, "VF exclusive mode timeout\n");
+ /* Don't send request since VF is inactive. */
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ adev->virt.ops = NULL;
+ r = -EAGAIN;
+ goto failed;
+ }
+ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- amdgpu_fini(adev);
+ amdgpu_device_ip_fini(adev);
goto failed;
}
@@ -2343,7 +1997,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- r = amdgpu_gem_debugfs_init(adev);
+ r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2351,17 +2005,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_test_ib_ring_init(adev);
- if (r)
- DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
-
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_vbios_dump_init(adev);
+ r = amdgpu_debugfs_init(adev);
if (r)
- DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+ DRM_ERROR("Creating debugfs files failed (%d).\n", r);
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
@@ -2379,9 +2029,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_late_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2392,6 +2042,7 @@ failed:
amdgpu_vf_error_trans_all(adev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+
return r;
}
@@ -2411,13 +2062,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+
amdgpu_ib_pool_fini(adev);
- amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_fini(adev);
+ r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
@@ -2440,7 +2089,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2521,7 +2170,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2529,7 +2178,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2578,18 +2226,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r)
DRM_ERROR("amdgpu asic init failed\n");
}
- r = amdgpu_resume(adev);
+ r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
goto unlock;
}
amdgpu_fence_driver_resume(adev);
@@ -2600,7 +2247,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r)
goto unlock;
@@ -2680,7 +2327,7 @@ unlock:
return r;
}
-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
{
int i;
bool asic_hang = false;
@@ -2702,7 +2349,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang;
}
-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2720,7 +2367,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
@@ -2741,7 +2388,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
return false;
}
-static int amdgpu_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2759,7 +2406,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2776,18 +2423,10 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
return 0;
}
-bool amdgpu_need_backup(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- return amdgpu_lockup_timeout > 0 ? true : false;
-}
-
-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2819,163 +2458,181 @@ err:
return r;
}
-/**
- * amdgpu_sriov_gpu_reset - reset the asic
+/*
+ * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
*
* @adev: amdgpu device pointer
- * @job: which job trigger hang
+ * @reset_flags: output param tells caller the reset result
*
- * Attempt the reset the GPU if it has hung (all asics).
- * for SRIOV case.
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
+ * attempt to do soft-reset or full-reset and reinitialize Asic
+ * return 0 means successed otherwise failed
+*/
+static int amdgpu_device_reset(struct amdgpu_device *adev,
+ uint64_t* reset_flags)
{
- int i, j, r = 0;
- int resched;
- struct amdgpu_bo *bo, *tmp;
- struct amdgpu_ring *ring;
- struct dma_fence *fence = NULL, *next = NULL;
+ bool need_full_reset, vram_lost = 0;
+ int r;
- mutex_lock(&adev->virt.lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_sriov_reset = true;
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
- /* we start from the ring trigger GPU hang */
- j = job ? job->ring->idx : 0;
+ }
- /* block scheduler */
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
- if (!ring || !ring->sched.thread)
- continue;
+ if (need_full_reset) {
+ r = amdgpu_device_ip_suspend(adev);
- kthread_park(ring->sched.thread);
+retry:
+ r = amdgpu_asic_reset(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
- if (job && j != i)
- continue;
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(adev);
+ if (r)
+ goto out;
- /* here give the last chance to check if job removed from mirror-list
- * since we already pay some time on kthread_park */
- if (job && list_empty(&job->base.node)) {
- kthread_unpark(ring->sched.thread);
- goto give_up_reset;
+ vram_lost = amdgpu_device_check_vram_lost(adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_ip_resume_phase2(adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(adev);
}
+ }
- if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
- amd_sched_job_kickout(&job->base);
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(adev);
+ need_full_reset = true;
+ goto retry;
+ }
+ }
- /* only do job_reset on the hang ring if @job not NULL */
- amd_sched_hw_job_reset(&ring->sched);
+ if (reset_flags) {
+ if (vram_lost)
+ (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion_ring(ring);
+ if (need_full_reset)
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
}
- /* request to take full control of GPU before re-initialization */
- if (job)
- amdgpu_virt_reset_gpu(adev);
- else
- amdgpu_virt_request_full_gpu(adev, true);
+ return r;
+}
+/*
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu device pointer
+ * @reset_flags: output param tells caller the reset result
+ *
+ * do VF FLR and reinitialize Asic
+ * return 0 means successed otherwise failed
+*/
+static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ uint64_t *reset_flags,
+ bool from_hypervisor)
+{
+ int r;
+
+ if (from_hypervisor)
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ else
+ r = amdgpu_virt_reset_gpu(adev);
+ if (r)
+ return r;
/* Resume IP prior to SMC */
- amdgpu_sriov_reinit_early(adev);
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
+ if (r)
+ goto error;
/* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_ttm_recover_gart(adev);
+ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
/* now we are okay to resume SMC/CP/SDMA */
- amdgpu_sriov_reinit_late(adev);
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ if (r)
+ goto error;
amdgpu_irq_gpu_reset_resume_helper(adev);
-
- if (amdgpu_ib_ring_tests(adev))
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
+error:
/* release full control of GPU after ib test */
amdgpu_virt_release_full_gpu(adev, true);
- DRM_INFO("recover vram bo from shadow\n");
-
- ring = adev->mman.buffer_funcs_ring;
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
- next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r) {
- WARN(r, "recovery from shadow isn't completed\n");
- break;
- }
- }
-
- dma_fence_put(fence);
- fence = next;
- }
- mutex_unlock(&adev->shadow_list_lock);
-
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r)
- WARN(r, "recovery from shadow isn't completed\n");
- }
- dma_fence_put(fence);
-
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
- if (!ring || !ring->sched.thread)
- continue;
-
- if (job && j != i) {
- kthread_unpark(ring->sched.thread);
- continue;
+ if (reset_flags) {
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
+ atomic_inc(&adev->vram_lost_counter);
}
- amd_sched_job_recovery(&ring->sched);
- kthread_unpark(ring->sched.thread);
+ /* VF FLR or hotlink reset is always full-reset */
+ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
}
- drm_helper_resume_force_mode(adev->ddev);
-give_up_reset:
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
- } else {
- dev_info(adev->dev, "GPU reset successed!\n");
- }
-
- adev->in_sriov_reset = false;
- mutex_unlock(&adev->virt.lock_reset);
return r;
}
/**
- * amdgpu_gpu_reset - reset the asic
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
* @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ * @force forces reset regardless of amdgpu_gpu_recovery
*
- * Attempt the reset the GPU if it has hung (all asics).
+ * Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
-int amdgpu_gpu_reset(struct amdgpu_device *adev)
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job, bool force)
{
struct drm_atomic_state *state = NULL;
- int i, r;
- int resched;
- bool need_full_reset, vram_lost = false;
+ uint64_t reset_flags = 0;
+ int i, r, resched;
- if (!amdgpu_check_soft_reset(adev)) {
+ if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
return 0;
}
+ if (!force && (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
+ DRM_INFO("GPU recovery disabled.\n");
+ return 0;
+ }
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -2989,69 +2646,26 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
if (!ring || !ring->sched.thread)
continue;
- kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched);
- }
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion(adev);
- need_full_reset = amdgpu_need_full_reset(adev);
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
- if (!need_full_reset) {
- amdgpu_pre_soft_reset(adev);
- r = amdgpu_soft_reset(adev);
- amdgpu_post_soft_reset(adev);
- if (r || amdgpu_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
+ kthread_park(ring->sched.thread);
+ drm_sched_hw_job_reset(&ring->sched, &job->base);
- if (need_full_reset) {
- r = amdgpu_suspend(adev);
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
+ }
-retry:
- amdgpu_atombios_scratch_regs_save(adev);
- r = amdgpu_asic_reset(adev);
- amdgpu_atombios_scratch_regs_restore(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (amdgpu_sriov_vf(adev))
+ r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
+ else
+ r = amdgpu_device_reset(adev, &reset_flags);
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume_phase1(adev);
- if (r)
- goto out;
- vram_lost = amdgpu_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
- r = amdgpu_ttm_recover_gart(adev);
- if (r)
- goto out;
- r = amdgpu_resume_phase2(adev);
- if (r)
- goto out;
- if (vram_lost)
- amdgpu_fill_reset_magic(adev);
- }
- }
-out:
if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- /**
- * recovery vm page tables, since we cannot depend on VRAM is
- * consistent after gpu full reset.
- */
- if (need_full_reset && amdgpu_need_backup(adev)) {
+ if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
+ (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
struct dma_fence *fence = NULL, *next = NULL;
@@ -3060,7 +2674,7 @@ out:
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
if (r) {
@@ -3080,44 +2694,60 @@ out:
}
dma_fence_put(fence);
}
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->sched.thread)
continue;
- amd_sched_job_recovery(&ring->sched);
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
+
+ drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
} else {
- dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i] && adev->rings[i]->sched.thread) {
- kthread_unpark(adev->rings[i]->sched.thread);
- }
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* only focus on the ring hit timeout if &job not NULL */
+ if (job && job->ring->idx != i)
+ continue;
+
+ kthread_unpark(adev->rings[i]->sched.thread);
}
}
if (amdgpu_device_has_dc_support(adev)) {
- r = drm_atomic_helper_resume(adev->ddev, state);
+ if (drm_atomic_helper_resume(adev->ddev, state))
+ dev_info(adev->dev, "drm resume failed:%d\n", r);
amdgpu_dm_display_resume(adev);
- } else
+ } else {
drm_helper_resume_force_mode(adev->ddev);
+ }
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
- }
- else {
- dev_info(adev->dev, "GPU reset successed!\n");
+ dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
}
amdgpu_vf_error_trans_all(adev);
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
return r;
}
-void amdgpu_get_pcie_info(struct amdgpu_device *adev)
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
@@ -3209,773 +2839,3 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
}
}
-/*
- * Debugfs
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
-#endif
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- goto end;
-
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto end;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
-end:
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_PCIE(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_PCIE(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_DIDT(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_DIDT(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_SMC(*pos);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_SMC(*pos, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- uint32_t *config, no_regs = 0;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- /* version, increment each time something is added */
- config[no_regs++] = 3;
- config[no_regs++] = adev->gfx.config.max_shader_engines;
- config[no_regs++] = adev->gfx.config.max_tile_pipes;
- config[no_regs++] = adev->gfx.config.max_cu_per_sh;
- config[no_regs++] = adev->gfx.config.max_sh_per_se;
- config[no_regs++] = adev->gfx.config.max_backends_per_se;
- config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
- config[no_regs++] = adev->gfx.config.max_gprs;
- config[no_regs++] = adev->gfx.config.max_gs_threads;
- config[no_regs++] = adev->gfx.config.max_hw_contexts;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
- config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.num_tile_pipes;
- config[no_regs++] = adev->gfx.config.backend_enable_mask;
- config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
- config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
- config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
- config[no_regs++] = adev->gfx.config.num_gpus;
- config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
- config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
- config[no_regs++] = adev->gfx.config.gb_addr_config;
- config[no_regs++] = adev->gfx.config.num_rbs;
-
- /* rev==1 */
- config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
-
- /* rev==2 */
- config[no_regs++] = adev->family;
- config[no_regs++] = adev->external_rev_id;
-
- /* rev==3 */
- config[no_regs++] = adev->pdev->device;
- config[no_regs++] = adev->pdev->revision;
- config[no_regs++] = adev->pdev->subsystem_device;
- config[no_regs++] = adev->pdev->subsystem_vendor;
-
- while (size && (*pos < no_regs * 4)) {
- uint32_t value;
-
- value = config[*pos >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- kfree(config);
- return r;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- kfree(config);
- return result;
-}
-
-static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- int idx, x, outsize, r, valuesize;
- uint32_t values[16];
-
- if (size & 3 || *pos & 0x3)
- return -EINVAL;
-
- if (amdgpu_dpm == 0)
- return -EINVAL;
-
- /* convert offset to sensor number */
- idx = *pos >> 2;
-
- valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
-
- if (size > valuesize)
- return -EINVAL;
-
- outsize = 0;
- x = 0;
- if (!r) {
- while (size) {
- r = put_user(values[x++], (int32_t *)buf);
- buf += 4;
- size -= 4;
- outsize += 4;
- }
- }
-
- return !r ? outsize : r;
-}
-
-static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r, x;
- ssize_t result=0;
- uint32_t offset, se, sh, cu, wave, simd, data[32];
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = (*pos & GENMASK_ULL(6, 0));
- se = (*pos & GENMASK_ULL(14, 7)) >> 7;
- sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
- cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
- wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
- simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- x = 0;
- if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (!x)
- return -EINVAL;
-
- while (size && (offset < x * 4)) {
- uint32_t value;
-
- value = data[offset >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- offset += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r;
- ssize_t result = 0;
- uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
- se = (*pos & GENMASK_ULL(19, 12)) >> 12;
- sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
- cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
- wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
- simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
- thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
- bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
-
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- if (bank == 0) {
- if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
- } else {
- if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
- }
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- while (size) {
- uint32_t value;
-
- value = data[offset++];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto err;
- }
-
- result += 4;
- buf += 4;
- size -= 4;
- }
-
-err:
- kfree(data);
- return result;
-}
-
-static const struct file_operations amdgpu_debugfs_regs_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_read,
- .write = amdgpu_debugfs_regs_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_didt_read,
- .write = amdgpu_debugfs_regs_didt_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_pcie_read,
- .write = amdgpu_debugfs_regs_pcie_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_smc_read,
- .write = amdgpu_debugfs_regs_smc_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_gca_config_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gca_config_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_sensors_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_sensor_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_wave_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_wave_read,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_gpr_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gpr_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations *debugfs_regs[] = {
- &amdgpu_debugfs_regs_fops,
- &amdgpu_debugfs_regs_didt_fops,
- &amdgpu_debugfs_regs_pcie_fops,
- &amdgpu_debugfs_regs_smc_fops,
- &amdgpu_debugfs_gca_config_fops,
- &amdgpu_debugfs_sensors_fops,
- &amdgpu_debugfs_wave_fops,
- &amdgpu_debugfs_gpr_fops,
-};
-
-static const char *debugfs_regs_names[] = {
- "amdgpu_regs",
- "amdgpu_regs_didt",
- "amdgpu_regs_pcie",
- "amdgpu_regs_smc",
- "amdgpu_gca_config",
- "amdgpu_sensors",
- "amdgpu_wave",
- "amdgpu_gpr",
-};
-
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
- adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
-
- if (!i)
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
- }
-
- return 0;
-}
-
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int r = 0, i;
-
- /* hold on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- }
-
- seq_printf(m, "run ib test:\n");
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- seq_printf(m, "ib ring tests failed (%d).\n", r);
- else
- seq_printf(m, "ib ring tests passed.\n");
-
- /* go on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_unpark(ring->sched.thread);
- }
-
- return 0;
-}
-
-static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
-};
-
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_debugfs_test_ib_ring_list, 1);
-}
-
-int amdgpu_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static const struct drm_info_list amdgpu_vbios_dump_list[] = {
- {"amdgpu_vbios",
- amdgpu_debugfs_get_vbios_dump,
- 0, NULL},
-};
-
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_vbios_dump_list, 1);
-}
-#else
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 138beb550a58..38d47559f098 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -34,6 +34,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
@@ -556,15 +557,9 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-void amdgpu_output_poll_changed(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
- amdgpu_fb_output_poll_changed(adev);
-}
-
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_user_framebuffer_create,
- .output_poll_changed = amdgpu_output_poll_changed
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 3cc0ef0c055e..0bcb6c6e0ca9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -25,9 +25,7 @@
struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
-void amdgpu_output_poll_changed(struct drm_device *dev);
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 56caaeee6fea..a8437a3296a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -360,6 +360,12 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
+#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
+ virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
+ ((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
+ (adev)->powerplay.pp_handle, virtual_addr_low, \
+ virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index c2f414ffb2cc..50afcf65181a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 10000;
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -216,7 +217,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
-MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -306,7 +310,6 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
-
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -566,12 +569,13 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
return 0;
}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
unsigned long flags = ent->driver_data;
- int ret;
+ int ret, retry = 0;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n"
@@ -604,8 +608,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+retry_init:
ret = drm_dev_register(dev, ent->driver_data);
- if (ret)
+ if (ret == -EAGAIN && ++retry <= 3) {
+ DRM_INFO("retry init %d\n", retry);
+ /* Don't request EX mode too frequently which is attacking */
+ msleep(5000);
+ goto retry_init;
+ } else if (ret)
goto err_pci;
return 0;
@@ -639,7 +649,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_suspend(adev);
+ amdgpu_device_ip_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -844,9 +854,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = amdgpu_debugfs_init,
-#endif
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
.irq_uninstall = amdgpu_irq_uninstall,
@@ -906,10 +913,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- r = amd_sched_fence_slab_init();
- if (r)
- goto error_sched;
-
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -922,9 +925,6 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(pdriver);
-error_sched:
- amdgpu_fence_slab_fini();
-
error_fence:
amdgpu_sync_fini();
@@ -938,7 +938,6 @@ static void __exit amdgpu_exit(void)
pci_unregister_driver(pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
- amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 90fa8e8bc6fb..ff3e9beb7d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -283,12 +283,6 @@ out:
return ret;
}
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
-}
-
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
@@ -393,24 +387,3 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
return true;
return false;
}
-
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *afbdev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!adev)
- return;
-
- afbdev = adev->mode_info.rfbdev;
-
- if (!afbdev)
- return;
-
- fb_helper = &afbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2fa95aef74d5..008e1984b7e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -187,7 +187,7 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ seq, 0);
*s = seq;
@@ -391,9 +391,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
+ "cpu addr 0x%p\n", ring->idx,
+ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
return 0;
}
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
- long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -434,20 +433,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission,
- timeout, ring->name);
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission, amdgpu_job_hang_limit,
+ msecs_to_jiffies(amdgpu_lockup_timeout), ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -499,11 +487,11 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
- amdgpu_fence_driver_force_completion(adev);
+ amdgpu_fence_driver_force_completion(ring);
}
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- amd_sched_fini(&ring->sched);
+ drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -534,7 +522,7 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* delay GPU reset to resume */
- amdgpu_fence_driver_force_completion(adev);
+ amdgpu_fence_driver_force_completion(ring);
}
/* disable the interrupt */
@@ -571,30 +559,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_force_completion - force all fence waiter to complete
+ * amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
- * @adev: amdgpu device pointer
+ * @ring: fence of the ring to signal
*
- * In case of GPU reset failure make sure no process keep waiting on fence
- * that will never complete.
*/
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
{
- int i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->fence_drv.initialized)
- continue;
-
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
- }
-}
-
-void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
-{
- if (ring)
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ amdgpu_fence_process(ring);
}
/*
@@ -709,25 +682,25 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
}
/**
- * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
+ * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
*
* Manually trigger a gpu reset at the next fence wait.
*/
-static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
+static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- seq_printf(m, "gpu reset\n");
- amdgpu_gpu_reset(adev);
+ seq_printf(m, "gpu recover\n");
+ amdgpu_device_gpu_recover(adev, NULL, true);
return 0;
}
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
+ {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
};
static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index fe818501c520..0a4f34afaaaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -57,60 +57,48 @@
*/
/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
+ * amdgpu_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
*/
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- void *ptr;
-
- ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
- &adev->gart.table_addr);
- if (ptr == NULL) {
+ if (adev->dummy_page.page)
+ return 0;
+ adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+ if (adev->dummy_page.page == NULL)
+ return -ENOMEM;
+ adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
return -ENOMEM;
}
-#ifdef CONFIG_X86
- if (0) {
- set_memory_uc((unsigned long)ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- adev->gart.ptr = ptr;
- memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
return 0;
}
/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
+ * amdgpu_dummy_page_fini - free dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
+ * Frees the dummy page used by the driver (all asics).
*/
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
- if (adev->gart.ptr == NULL) {
+ if (adev->dummy_page.page == NULL)
return;
- }
-#ifdef CONFIG_X86
- if (0) {
- set_memory_wb((unsigned long)adev->gart.ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- pci_free_consistent(adev->pdev, adev->gart.table_size,
- (void *)adev->gart.ptr,
- adev->gart.table_addr);
- adev->gart.ptr = NULL;
- adev->gart.table_addr = 0;
+ pci_unmap_page(adev->pdev, adev->dummy_page.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
}
/**
@@ -365,7 +353,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
- r = amdgpu_dummy_page_init(adev);
+ r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
@@ -377,10 +365,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
- if (adev->gart.pages == NULL) {
- amdgpu_gart_fini(adev);
+ if (adev->gart.pages == NULL)
return -ENOMEM;
- }
#endif
return 0;
@@ -395,14 +381,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
- if (adev->gart.ready) {
- /* unbind pages */
- amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
- }
- adev->gart.ready = false;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
- amdgpu_dummy_page_fini(adev);
+ amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index afbe803b1a13..d4a43302c2be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -39,7 +39,7 @@ struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
struct amdgpu_gart {
- dma_addr_t table_addr;
+ u64 table_addr;
struct amdgpu_bo *robj;
void *ptr;
unsigned num_gpu_pages;
@@ -56,8 +56,6 @@ struct amdgpu_gart {
const struct amdgpu_gart_funcs *gart_funcs;
};
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e87eedcc0da9..e48b4ec88c8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -72,7 +72,7 @@ retry:
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+ DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
@@ -282,6 +282,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
@@ -335,7 +336,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
@@ -517,10 +518,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (!amdgpu_vm_ready(vm))
return;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- goto error;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
goto error;
@@ -529,6 +526,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
operation == AMDGPU_VA_OP_REPLACE)
r = amdgpu_vm_bo_update(adev, bo_va, false);
+ r = amdgpu_vm_update_directories(adev, vm);
+ if (r)
+ goto error;
+
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
@@ -557,14 +558,25 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
+ dev_dbg(&dev->pdev->dev,
"va_address 0x%LX is in reserved area 0x%LX\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
+ if (args->va_address >= AMDGPU_VA_HOLE_START &&
+ args->va_address < AMDGPU_VA_HOLE_END) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+ args->va_address, AMDGPU_VA_HOLE_START,
+ AMDGPU_VA_HOLE_END);
+ return -EINVAL;
+ }
+
+ args->va_address &= AMDGPU_VA_HOLE_MASK;
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -576,7 +588,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
@@ -839,7 +851,7 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
};
#endif
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ef043361009f..bb40d2529a30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -203,7 +203,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
return r;
@@ -229,7 +229,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq)
{
- amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 00e0ce10862f..e14ab34d8262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
atomic64_t available;
};
+struct amdgpu_gtt_node {
+ struct drm_mm_node node;
+ struct ttm_buffer_object *tbo;
+};
+
/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
@@ -79,17 +84,17 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
- * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
* @mem: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
{
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
- return (node->start != AMDGPU_BO_INVALID_OFFSET);
+ return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
}
/**
@@ -109,12 +114,12 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
- if (amdgpu_gtt_mgr_is_allocated(mem))
+ if (amdgpu_gtt_mgr_has_gart_addr(mem))
return 0;
if (place)
@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
mode = DRM_MM_INSERT_HIGH;
spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, node,
- mem->num_pages, mem->page_alignment, 0,
- fpfn, lpfn, mode);
+ r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
+ mem->page_alignment, 0, fpfn, lpfn,
+ mode);
spin_unlock(&mgr->lock);
if (!r)
- mem->start = node->start;
+ mem->start = node->node.start;
return r;
}
@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node;
+ struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out;
}
- node->start = AMDGPU_BO_INVALID_OFFSET;
- node->size = mem->num_pages;
+ node->node.start = AMDGPU_BO_INVALID_OFFSET;
+ node->node.size = mem->num_pages;
+ node->tbo = tbo;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out;
}
} else {
- mem->start = node->start;
+ mem->start = node->node.start;
}
return 0;
@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
if (!node)
return;
spin_lock(&mgr->lock);
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
- drm_mm_remove_node(node);
+ if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
+ drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
return (result > 0 ? result : 0) * PAGE_SIZE;
}
+int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_node *node;
+ struct drm_mm_node *mm_node;
+ int r = 0;
+
+ spin_lock(&mgr->lock);
+ drm_mm_for_each_node(mm_node, &mgr->mm) {
+ node = container_of(mm_node, struct amdgpu_gtt_node, node);
+ r = amdgpu_ttm_recover_gart(node->tbo);
+ if (r)
+ break;
+ }
+ spin_unlock(&mgr->lock);
+
+ return r;
+}
+
/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 659997bfff30..a162d87ca0c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- if (vm && !job->vm_id) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if (ring->funcs->emit_pipeline_sync && job &&
- ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
+ ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+ amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
need_ctx_switch);
need_ctx_switch = false;
}
@@ -229,9 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
- job->vm_id);
+ if (job && job->vmid)
+ amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
new file mode 100644
index 000000000000..16884a0b677b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ids.h"
+
+#include <linux/idr.h>
+#include <linux/dma-fence-array.h>
+#include <drm/drmP.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_pasid_ida);
+
+/**
+ * amdgpu_pasid_alloc - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_pasid_alloc(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+/**
+ * amdgpu_pasid_free - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_pasid_free(unsigned int pasid)
+{
+ ida_simple_remove(&amdgpu_pasid_ida, pasid);
+}
+
+/*
+ * VMID manager
+ *
+ * VMIDs are a per VMHUB identifier for page tables handling.
+ */
+
+/**
+ * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter);
+}
+
+/* idr_mgr->lock must be held */
+static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct dma_fence *fence,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence *updates = sync->last_vm_update;
+ int r = 0;
+ struct dma_fence *flushed, *tmp;
+ bool needs_flush = vm->use_cpu_for_update;
+
+ flushed = id->flushed_updates;
+ if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
+ (atomic64_read(&id->owner) != vm->entity.fence_context) ||
+ (job->vm_pd_addr != id->pd_gpu_addr) ||
+ (updates && (!flushed || updates->context != flushed->context ||
+ dma_fence_is_later(updates, flushed))) ||
+ (!id->last_flush || (id->last_flush->context != fence_context &&
+ !dma_fence_is_signaled(id->last_flush)))) {
+ needs_flush = true;
+ /* to prevent one context starved by another context */
+ id->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&id->active, ring);
+ if (tmp) {
+ r = amdgpu_sync_fence(adev, sync, tmp, false);
+ return r;
+ }
+ }
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto out;
+
+ if (updates && (!flushed || updates->context != flushed->context ||
+ dma_fence_is_later(updates, flushed))) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
+ id->pd_gpu_addr = job->vm_pd_addr;
+ atomic64_set(&id->owner, vm->entity.fence_context);
+ job->vm_needs_flush = needs_flush;
+ if (needs_flush) {
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+out:
+ return r;
+}
+
+/**
+ * amdgpu_vm_grab_id - allocate the next free VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct dma_fence *updates = sync->last_vm_update;
+ struct amdgpu_vmid *id, *idle;
+ struct dma_fence **fences;
+ unsigned i;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
+ mutex_unlock(&id_mgr->lock);
+ return r;
+ }
+ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+ if (!fences) {
+ mutex_unlock(&id_mgr->lock);
+ return -ENOMEM;
+ }
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &id_mgr->ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &id_mgr->ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct dma_fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ dma_fence_get(fences[j]);
+
+ array = dma_fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ dma_fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
+ dma_fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&id_mgr->lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = vm->use_cpu_for_update;
+ /* Check if we can use a VMID already assigned to this VM */
+ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
+ struct dma_fence *flushed;
+ bool needs_flush = vm->use_cpu_for_update;
+
+ /* Check all the prerequisites to using this VMID */
+ if (amdgpu_vmid_had_gpu_reset(adev, id))
+ continue;
+
+ if (atomic64_read(&id->owner) != vm->entity.fence_context)
+ continue;
+
+ if (job->vm_pd_addr != id->pd_gpu_addr)
+ continue;
+
+ if (!id->last_flush ||
+ (id->last_flush->context != fence_context &&
+ !dma_fence_is_signaled(id->last_flush)))
+ needs_flush = true;
+
+ flushed = id->flushed_updates;
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ needs_flush = true;
+
+ /* Concurrent flushes are only possible starting with Vega10 */
+ if (adev->asic_type < CHIP_VEGA10 && needs_flush)
+ continue;
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto error;
+
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
+
+ if (needs_flush)
+ goto needs_flush;
+ else
+ goto no_flush_needed;
+
+ };
+
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
+
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto error;
+
+ id->pd_gpu_addr = job->vm_pd_addr;
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ atomic64_set(&id->owner, vm->entity.fence_context);
+
+needs_flush:
+ job->vm_needs_flush = true;
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+
+no_flush_needed:
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+
+ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+
+error:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr;
+ struct amdgpu_vmid *idle;
+ int r = 0;
+
+ id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
+ AMDGPU_VM_MAX_RESERVED_VMID) {
+ DRM_ERROR("Over limitation of reserved vmid\n");
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ r = -EINVAL;
+ goto unlock;
+ }
+ /* Select the first entry VMID */
+ idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&idle->list);
+ vm->reserved_vmid[vmhub] = idle;
+ mutex_unlock(&id_mgr->lock);
+
+ return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ }
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ * @vmid: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[vmid];
+
+ atomic64_set(&id->owner, 0);
+ id->gds_base = 0;
+ id->gds_size = 0;
+ id->gws_base = 0;
+ id->gws_size = 0;
+ id->oa_base = 0;
+ id->oa_size = 0;
+}
+
+/**
+ * amdgpu_vmid_reset_all - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ for (j = 1; j < id_mgr->num_ids; ++j)
+ amdgpu_vmid_reset(adev, i, j);
+ }
+}
+
+/**
+ * amdgpu_vmid_mgr_init - init the VMID manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_init(&id_mgr->lock);
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
+ atomic_set(&id_mgr->reserved_vmid_num, 0);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (j = 1; j < id_mgr->num_ids; ++j) {
+ amdgpu_vmid_reset(adev, i, j);
+ amdgpu_sync_create(&id_mgr->ids[i].active);
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
+ }
+
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+}
+
+/**
+ * amdgpu_vmid_mgr_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_destroy(&id_mgr->lock);
+ for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
+ struct amdgpu_vmid *id = &id_mgr->ids[j];
+
+ amdgpu_sync_free(&id->active);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
new file mode 100644
index 000000000000..ad931fa570b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_IDS_H__
+#define __AMDGPU_IDS_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/dma-fence.h>
+
+#include "amdgpu_sync.h"
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VMID 16
+
+struct amdgpu_device;
+struct amdgpu_vm;
+struct amdgpu_ring;
+struct amdgpu_sync;
+struct amdgpu_job;
+
+struct amdgpu_vmid {
+ struct list_head list;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vmid_mgr {
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
+ atomic_t reserved_vmid_num;
+};
+
+int amdgpu_pasid_alloc(unsigned int bits);
+void amdgpu_pasid_free(unsigned int pasid);
+
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid);
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f5f27e4f0f7f..06373d44b3da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -92,15 +92,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
}
return 0;
} else {
- r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
if (r) {
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
return r;
}
@@ -133,8 +133,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
&adev->irq.ih.gpu_addr,
(void **)&adev->irq.ih.ring);
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index ada89358e220..29cf10927a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -105,8 +105,8 @@ struct amdgpu_iv_entry {
unsigned client_id;
unsigned src_id;
unsigned ring_id;
- unsigned vm_id;
- unsigned vm_id_src;
+ unsigned vmid;
+ unsigned vmid_src;
uint64_t timestamp;
unsigned timestamp_src;
unsigned pas_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 47c5ce9807db..56bcd59c3399 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
reset_work);
if (!amdgpu_sriov_vf(adev))
- amdgpu_gpu_reset(adev);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
/* Disable *all* interrupts */
@@ -232,7 +232,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int ret = pci_enable_msi(adev->pdev);
if (!ret) {
adev->irq.msi_enabled = true;
- dev_info(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI.\n");
}
}
@@ -262,7 +262,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
return r;
}
- DRM_INFO("amdgpu: irq initialized.\n");
+ DRM_DEBUG("amdgpu: irq initialized.\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0cfc68db575b..2bd56760c744 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
- if (amdgpu_sriov_vf(job->adev))
- amdgpu_sriov_gpu_reset(job->adev, job);
- else
- amdgpu_gpu_reset(job->adev);
+ amdgpu_device_gpu_recover(job->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
- amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
@@ -100,14 +96,13 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
+ amdgpu_ring_priority_put(job->ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
@@ -118,13 +113,12 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f)
{
int r;
@@ -133,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
@@ -141,46 +135,47 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring,
- amd_sched_get_job_priority(&job->base));
- amd_sched_entity_push_job(&job->base);
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+ drm_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
-
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
+ bool explicit = false;
int r;
-
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
- if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+
+ if (fence && explicit) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ }
}
- if (!fence)
- fence = amdgpu_sync_get_fence(&job->sync);
- while (fence == NULL && vm && !job->vm_id) {
+
+ while (fence == NULL && vm && !job->vmid) {
struct amdgpu_ring *ring = job->ring;
- r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
- fence = amdgpu_sync_get_fence(&job->sync);
+ fence = amdgpu_sync_get_fence(&job->sync, NULL);
}
return fence;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
struct amdgpu_job *job;
int r;
@@ -190,15 +185,18 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
+ finished = &job->base.s_fence->finished;
adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- /* skip ib schedule when vram is lost */
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
- dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
- DRM_ERROR("Skip scheduling IBs!\n");
+
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+
+ if (finished->error < 0) {
+ DRM_INFO("Skip scheduling IBs!\n");
} else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
&fence);
@@ -213,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 720139e182a3..bd6e9a40f421 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -63,8 +63,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_acpi_fini(adev);
amdgpu_device_fini(adev);
@@ -159,9 +157,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- amdgpu_amdkfd_device_probe(adev);
- amdgpu_amdkfd_device_init(adev);
-
if (amdgpu_device_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -171,9 +166,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, true);
-
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -558,6 +550,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
+ uint64_t vm_size;
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
@@ -585,8 +578,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
+
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
- dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ dev_info.virtual_address_max =
+ min(vm_size, AMDGPU_VA_HOLE_START);
+
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (vm_size > AMDGPU_VA_HOLE_START) {
+ dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
+ dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
+ }
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -786,9 +788,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*/
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_fbdev_restore_mode(adev);
+ drm_fb_helper_lastclose(dev);
vga_switcheroo_process_delayed_switch();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index ffde1e9666e8..54f06c959340 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -89,7 +89,6 @@ enum amdgpu_hpd_id {
AMDGPU_HPD_4,
AMDGPU_HPD_5,
AMDGPU_HPD_6,
- AMDGPU_HPD_LAST,
AMDGPU_HPD_NONE = 0xff,
};
@@ -106,7 +105,6 @@ enum amdgpu_crtc_irq {
AMDGPU_CRTC_IRQ_VLINE4,
AMDGPU_CRTC_IRQ_VLINE5,
AMDGPU_CRTC_IRQ_VLINE6,
- AMDGPU_CRTC_IRQ_LAST,
AMDGPU_CRTC_IRQ_NONE = 0xff
};
@@ -117,7 +115,6 @@ enum amdgpu_pageflip_irq {
AMDGPU_PAGEFLIP_IRQ_D4,
AMDGPU_PAGEFLIP_IRQ_D5,
AMDGPU_PAGEFLIP_IRQ_D6,
- AMDGPU_PAGEFLIP_IRQ_LAST,
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
};
@@ -661,10 +658,6 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
-
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
-
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ea25164e7f4b..5c4c3e0d527b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,6 +37,18 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+ return false;
+
+ return true;
+}
+
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -281,6 +293,44 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
+/* Validate bo size is bit bigger then the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ unsigned long size, u32 domain)
+{
+ struct ttm_mem_type_manager *man = NULL;
+
+ /*
+ * If GTT is part of requested domains the check must succeed to
+ * allow fall back to GTT
+ */
+ if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+ man = &adev->mman.bdev.man[TTM_PL_TT];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+ if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ man = &adev->mman.bdev.man[TTM_PL_VRAM];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+
+ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+ return true;
+
+fail:
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+ man->size << PAGE_SHIFT);
+ return false;
+}
+
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -289,16 +339,24 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = !kernel,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = true,
+ .resv = resv
+ };
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
- u64 initial_bytes_moved, bytes_moved;
size_t acc_size;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
+ if (!amdgpu_bo_validate_size(adev, size, domain))
+ return -ENOMEM;
+
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -364,22 +422,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.bdev = &adev->mman.bdev;
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- /* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, NULL,
+ &bo->placement, page_align, &ctx, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+ ctx.bytes_moved);
else
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (kernel)
bo->tbo.priority = 1;
@@ -511,6 +566,7 @@ err:
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
uint32_t domain;
int r;
@@ -521,7 +577,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
@@ -632,6 +688,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 *gpu_addr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -647,7 +704,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (domain != amdgpu_mem_type_to_domain(mem_type))
+ if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
bo->pin_count++;
@@ -682,21 +739,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto error;
+ }
+
bo->pin_count = 1;
- if (gpu_addr != NULL) {
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
+ if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- }
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -717,6 +776,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
@@ -730,7 +790,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
@@ -779,8 +839,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
- adev->mc.mc_vram_size >> 20,
- (unsigned long long)adev->mc.aper_size >> 20);
+ adev->mc.mc_vram_size >> 20,
+ (unsigned long long)adev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
return amdgpu_ttm_init(adev);
@@ -902,6 +962,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo;
unsigned long offset, size;
int r;
@@ -935,7 +996,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1];
- r = ttm_bo_validate(bo, &abo->placement, false, false);
+ r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0))
return r;
@@ -980,7 +1041,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
- !amdgpu_ttm_is_bound(bo->tbo.ttm));
+ !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 428aae048f4b..33615e2ea2e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -187,7 +187,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
{
switch (bo->tbo.mem.mem_type) {
- case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
+ case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
case TTM_PL_VRAM: return true;
default: return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f8edf5483f11..01a996c6b802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -32,7 +32,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include "amd_powerplay.h"
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -1279,16 +1278,16 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
amdgpu_pm_compute_clocks(adev);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
@@ -1585,7 +1584,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *ddev = adev->ddev;
u32 flags = 0;
- amdgpu_get_clockgating_state(adev, &flags);
+ amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 447d446b5015..2157d4509e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -264,7 +264,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
return ret;
@@ -334,23 +334,26 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ goto skip_memalloc;
+
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
return -ENOMEM;
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fence_buf_bo,
- &psp->fence_buf_mc_addr,
- &psp->fence_buf);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr,
+ &psp->fence_buf);
if (ret)
goto failed_mem2;
@@ -375,6 +378,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ret)
goto failed_mem;
+skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
goto failed_mem;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 93d86619e802..262c1267249e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -225,7 +225,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
/* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", instance);
+ DRM_DEBUG("invalid ip instance: %d\n", instance);
return -EINVAL;
}
@@ -255,13 +255,13 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->vcn.num_enc_rings;
break;
default:
- DRM_ERROR("unknown ip type: %d\n", hw_ip);
+ DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
}
if (ring >= ip_num_rings) {
- DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
- ring, ip_num_rings, hw_ip);
+ DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
+ ring, ip_num_rings, hw_ip);
return -EINVAL;
}
@@ -292,7 +292,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
default:
*out_ring = NULL;
r = -EINVAL;
- DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
+ DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
}
out_unlock:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a98fbbb4739f..13044e66dcaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
@@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
return;
/* no need to restore if the job is already at the lowest priority */
- if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ if (priority == DRM_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
@@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
goto out_unlock;
/* decay priority to the next level with a job available */
- for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- if (i == AMD_SCHED_PRIORITY_NORMAL
+ for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ if (i == DRM_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
@@ -206,7 +206,7 @@ out_unlock:
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
@@ -263,25 +263,25 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->fence_offs);
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
@@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
- for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
@@ -348,11 +348,11 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index b18c2b96691f..102dad3edf6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -25,7 +25,7 @@
#define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
/* max number of rings */
#define AMDGPU_MAX_RINGS 18
@@ -79,8 +79,7 @@ struct amdgpu_fence_driver {
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission);
@@ -122,11 +121,11 @@ struct amdgpu_ring_funcs {
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ unsigned vmid, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
@@ -155,14 +154,14 @@ struct amdgpu_ring_funcs {
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct drm_gpu_scheduler sched;
struct list_head lru_list;
struct amdgpu_bo *ring_obj;
@@ -187,6 +186,7 @@ struct amdgpu_ring {
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
+ bool use_pollmem;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
@@ -197,7 +197,7 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
bool has_compute_vm_bug;
- atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
@@ -213,9 +213,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 290cc3f9c433..86a0715d9431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -29,29 +29,29 @@
#include "amdgpu_vm.h"
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_HW;
+ return DRM_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_SW;
+ return DRM_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return AMD_SCHED_PRIORITY_NORMAL;
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return AMD_SCHED_PRIORITY_LOW;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
- return AMD_SCHED_PRIORITY_UNSET;
+ return DRM_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return AMD_SCHED_PRIORITY_INVALID;
+ return DRM_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
@@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index b28c067d3822..2a1a0c734bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a4bf21f8f1c1..df65c66dc956 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -35,6 +35,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
struct dma_fence *fence;
+ bool explicit;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -63,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
@@ -84,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
@@ -119,7 +120,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -128,6 +129,10 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
continue;
amdgpu_sync_keep_later(&e->fence, f);
+
+ /* Preserve eplicit flag to not loose pipe line sync */
+ e->explicit |= explicit;
+
return true;
}
return false;
@@ -141,24 +146,25 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
+ struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
-
if (amdgpu_sync_same_dev(adev, f) &&
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- if (amdgpu_sync_add_later(sync, f))
+ if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
+ e->explicit = explicit;
+
hash_add(sync->fences, &e->node, f->context);
e->fence = dma_fence_get(f);
return 0;
@@ -189,10 +195,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- r = amdgpu_sync_fence(adev, sync, f);
-
- if (explicit_sync)
- return r;
+ r = amdgpu_sync_fence(adev, sync, f, false);
flist = reservation_object_get_list(resv);
if (!flist || r)
@@ -212,15 +215,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
continue;
- /* Ignore fence from the same owner as
+ /* Ignore fence from the same owner and explicit one as
* long as it isn't undefined.
*/
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- fence_owner == owner)
+ (fence_owner == owner || explicit_sync))
continue;
}
- r = amdgpu_sync_fence(adev, sync, f);
+ r = amdgpu_sync_fence(adev, sync, f, false);
if (r)
break;
}
@@ -245,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
@@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* amdgpu_sync_get_fence - get the next fence from the sync object
*
* @sync: sync object to use
+ * @explicit: true if the next fence is explicit
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i;
-
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
+ if (explicit)
+ *explicit = e->explicit;
hash_del(&e->node);